var/home/core/zuul-output/0000755000175000017500000000000015071032066014525 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015071046321015470 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005315753315071046312017710 0ustar rootrootOct 06 21:30:53 crc systemd[1]: Starting Kubernetes Kubelet... Oct 06 21:30:53 crc restorecon[4724]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:53 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 06 21:30:54 crc restorecon[4724]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 06 21:30:54 crc restorecon[4724]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Oct 06 21:30:55 crc kubenswrapper[5014]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 06 21:30:55 crc kubenswrapper[5014]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Oct 06 21:30:55 crc kubenswrapper[5014]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 06 21:30:55 crc kubenswrapper[5014]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 06 21:30:55 crc kubenswrapper[5014]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Oct 06 21:30:55 crc kubenswrapper[5014]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.204660 5014 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.214075 5014 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.214256 5014 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.214359 5014 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.214451 5014 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.214558 5014 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.214748 5014 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.214860 5014 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.214955 5014 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.215045 5014 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.215155 5014 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.215250 5014 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.215340 5014 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.215429 5014 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.215518 5014 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.215764 5014 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.215985 5014 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.216093 5014 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.216188 5014 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.216278 5014 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.216367 5014 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.216455 5014 feature_gate.go:330] unrecognized feature gate: Example Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.216543 5014 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.216661 5014 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.216776 5014 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.216870 5014 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.216969 5014 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.217077 5014 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.217178 5014 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.217279 5014 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.217372 5014 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.217476 5014 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.217576 5014 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.217710 5014 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.217832 5014 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.217929 5014 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.218020 5014 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.218110 5014 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.218220 5014 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.218318 5014 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.218411 5014 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.218502 5014 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.218592 5014 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.218734 5014 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.218832 5014 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.218943 5014 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.219049 5014 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.219144 5014 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.219234 5014 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.219331 5014 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.219431 5014 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.219523 5014 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.219667 5014 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.219774 5014 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.219870 5014 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.219962 5014 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.220053 5014 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.220142 5014 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.220249 5014 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.220344 5014 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.220436 5014 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.220526 5014 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.220615 5014 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.220739 5014 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.220830 5014 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.220936 5014 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.221030 5014 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.221119 5014 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.221211 5014 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.221300 5014 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.221389 5014 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.221478 5014 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.223806 5014 flags.go:64] FLAG: --address="0.0.0.0" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.223952 5014 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224076 5014 flags.go:64] FLAG: --anonymous-auth="true" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224178 5014 flags.go:64] FLAG: --application-metrics-count-limit="100" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224276 5014 flags.go:64] FLAG: --authentication-token-webhook="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224369 5014 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224476 5014 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224583 5014 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224792 5014 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224846 5014 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224860 5014 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224878 5014 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224889 5014 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224900 5014 flags.go:64] FLAG: --cgroup-root="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224909 5014 flags.go:64] FLAG: --cgroups-per-qos="true" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224921 5014 flags.go:64] FLAG: --client-ca-file="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224930 5014 flags.go:64] FLAG: --cloud-config="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224939 5014 flags.go:64] FLAG: --cloud-provider="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224948 5014 flags.go:64] FLAG: --cluster-dns="[]" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224965 5014 flags.go:64] FLAG: --cluster-domain="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224974 5014 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224983 5014 flags.go:64] FLAG: --config-dir="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.224992 5014 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225002 5014 flags.go:64] FLAG: --container-log-max-files="5" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225017 5014 flags.go:64] FLAG: --container-log-max-size="10Mi" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225026 5014 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225037 5014 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225048 5014 flags.go:64] FLAG: --containerd-namespace="k8s.io" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225059 5014 flags.go:64] FLAG: --contention-profiling="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225069 5014 flags.go:64] FLAG: --cpu-cfs-quota="true" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225078 5014 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225088 5014 flags.go:64] FLAG: --cpu-manager-policy="none" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225097 5014 flags.go:64] FLAG: --cpu-manager-policy-options="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225109 5014 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225119 5014 flags.go:64] FLAG: --enable-controller-attach-detach="true" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225128 5014 flags.go:64] FLAG: --enable-debugging-handlers="true" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225136 5014 flags.go:64] FLAG: --enable-load-reader="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225146 5014 flags.go:64] FLAG: --enable-server="true" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225155 5014 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225167 5014 flags.go:64] FLAG: --event-burst="100" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225177 5014 flags.go:64] FLAG: --event-qps="50" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225186 5014 flags.go:64] FLAG: --event-storage-age-limit="default=0" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225195 5014 flags.go:64] FLAG: --event-storage-event-limit="default=0" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225204 5014 flags.go:64] FLAG: --eviction-hard="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225216 5014 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225224 5014 flags.go:64] FLAG: --eviction-minimum-reclaim="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225233 5014 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225244 5014 flags.go:64] FLAG: --eviction-soft="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225253 5014 flags.go:64] FLAG: --eviction-soft-grace-period="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225262 5014 flags.go:64] FLAG: --exit-on-lock-contention="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225271 5014 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225280 5014 flags.go:64] FLAG: --experimental-mounter-path="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225289 5014 flags.go:64] FLAG: --fail-cgroupv1="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225297 5014 flags.go:64] FLAG: --fail-swap-on="true" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225306 5014 flags.go:64] FLAG: --feature-gates="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225317 5014 flags.go:64] FLAG: --file-check-frequency="20s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225326 5014 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225336 5014 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225344 5014 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225355 5014 flags.go:64] FLAG: --healthz-port="10248" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225368 5014 flags.go:64] FLAG: --help="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225377 5014 flags.go:64] FLAG: --hostname-override="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225386 5014 flags.go:64] FLAG: --housekeeping-interval="10s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225395 5014 flags.go:64] FLAG: --http-check-frequency="20s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225404 5014 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225413 5014 flags.go:64] FLAG: --image-credential-provider-config="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225422 5014 flags.go:64] FLAG: --image-gc-high-threshold="85" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225433 5014 flags.go:64] FLAG: --image-gc-low-threshold="80" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225442 5014 flags.go:64] FLAG: --image-service-endpoint="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225454 5014 flags.go:64] FLAG: --kernel-memcg-notification="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225463 5014 flags.go:64] FLAG: --kube-api-burst="100" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225472 5014 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225482 5014 flags.go:64] FLAG: --kube-api-qps="50" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225491 5014 flags.go:64] FLAG: --kube-reserved="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225500 5014 flags.go:64] FLAG: --kube-reserved-cgroup="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225509 5014 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225518 5014 flags.go:64] FLAG: --kubelet-cgroups="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225528 5014 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225537 5014 flags.go:64] FLAG: --lock-file="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225546 5014 flags.go:64] FLAG: --log-cadvisor-usage="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225555 5014 flags.go:64] FLAG: --log-flush-frequency="5s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225565 5014 flags.go:64] FLAG: --log-json-info-buffer-size="0" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225596 5014 flags.go:64] FLAG: --log-json-split-stream="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225615 5014 flags.go:64] FLAG: --log-text-info-buffer-size="0" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225651 5014 flags.go:64] FLAG: --log-text-split-stream="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225661 5014 flags.go:64] FLAG: --logging-format="text" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225670 5014 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225680 5014 flags.go:64] FLAG: --make-iptables-util-chains="true" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225689 5014 flags.go:64] FLAG: --manifest-url="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225698 5014 flags.go:64] FLAG: --manifest-url-header="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225720 5014 flags.go:64] FLAG: --max-housekeeping-interval="15s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225729 5014 flags.go:64] FLAG: --max-open-files="1000000" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225741 5014 flags.go:64] FLAG: --max-pods="110" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225752 5014 flags.go:64] FLAG: --maximum-dead-containers="-1" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225761 5014 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225771 5014 flags.go:64] FLAG: --memory-manager-policy="None" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225780 5014 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225789 5014 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225798 5014 flags.go:64] FLAG: --node-ip="192.168.126.11" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225807 5014 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225832 5014 flags.go:64] FLAG: --node-status-max-images="50" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225843 5014 flags.go:64] FLAG: --node-status-update-frequency="10s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225852 5014 flags.go:64] FLAG: --oom-score-adj="-999" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225862 5014 flags.go:64] FLAG: --pod-cidr="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225871 5014 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225885 5014 flags.go:64] FLAG: --pod-manifest-path="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225894 5014 flags.go:64] FLAG: --pod-max-pids="-1" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225904 5014 flags.go:64] FLAG: --pods-per-core="0" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225913 5014 flags.go:64] FLAG: --port="10250" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225922 5014 flags.go:64] FLAG: --protect-kernel-defaults="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225931 5014 flags.go:64] FLAG: --provider-id="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225940 5014 flags.go:64] FLAG: --qos-reserved="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225949 5014 flags.go:64] FLAG: --read-only-port="10255" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225958 5014 flags.go:64] FLAG: --register-node="true" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225967 5014 flags.go:64] FLAG: --register-schedulable="true" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225976 5014 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.225992 5014 flags.go:64] FLAG: --registry-burst="10" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226001 5014 flags.go:64] FLAG: --registry-qps="5" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226010 5014 flags.go:64] FLAG: --reserved-cpus="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226020 5014 flags.go:64] FLAG: --reserved-memory="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226031 5014 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226041 5014 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226050 5014 flags.go:64] FLAG: --rotate-certificates="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226059 5014 flags.go:64] FLAG: --rotate-server-certificates="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226068 5014 flags.go:64] FLAG: --runonce="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226077 5014 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226086 5014 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226096 5014 flags.go:64] FLAG: --seccomp-default="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226105 5014 flags.go:64] FLAG: --serialize-image-pulls="true" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226114 5014 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226123 5014 flags.go:64] FLAG: --storage-driver-db="cadvisor" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226132 5014 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226141 5014 flags.go:64] FLAG: --storage-driver-password="root" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226150 5014 flags.go:64] FLAG: --storage-driver-secure="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226158 5014 flags.go:64] FLAG: --storage-driver-table="stats" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226167 5014 flags.go:64] FLAG: --storage-driver-user="root" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226175 5014 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226185 5014 flags.go:64] FLAG: --sync-frequency="1m0s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226194 5014 flags.go:64] FLAG: --system-cgroups="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226203 5014 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226219 5014 flags.go:64] FLAG: --system-reserved-cgroup="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226228 5014 flags.go:64] FLAG: --tls-cert-file="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226237 5014 flags.go:64] FLAG: --tls-cipher-suites="[]" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226249 5014 flags.go:64] FLAG: --tls-min-version="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226258 5014 flags.go:64] FLAG: --tls-private-key-file="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226267 5014 flags.go:64] FLAG: --topology-manager-policy="none" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226275 5014 flags.go:64] FLAG: --topology-manager-policy-options="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226284 5014 flags.go:64] FLAG: --topology-manager-scope="container" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226293 5014 flags.go:64] FLAG: --v="2" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226305 5014 flags.go:64] FLAG: --version="false" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226317 5014 flags.go:64] FLAG: --vmodule="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226328 5014 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.226337 5014 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226584 5014 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226594 5014 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226605 5014 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226613 5014 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226677 5014 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226686 5014 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226694 5014 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226705 5014 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226717 5014 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226726 5014 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226735 5014 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226744 5014 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226752 5014 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226760 5014 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226768 5014 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226776 5014 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226784 5014 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226791 5014 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226802 5014 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226812 5014 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226822 5014 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226831 5014 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226840 5014 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226849 5014 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226857 5014 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226865 5014 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226874 5014 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226882 5014 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226889 5014 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226897 5014 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226907 5014 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226917 5014 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226925 5014 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226933 5014 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226941 5014 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226948 5014 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226959 5014 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226969 5014 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226978 5014 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226987 5014 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.226995 5014 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227004 5014 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227013 5014 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227021 5014 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227030 5014 feature_gate.go:330] unrecognized feature gate: Example Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227038 5014 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227047 5014 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227057 5014 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227064 5014 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227072 5014 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227080 5014 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227088 5014 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227097 5014 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227104 5014 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227112 5014 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227119 5014 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227127 5014 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227135 5014 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227143 5014 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227150 5014 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227158 5014 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227166 5014 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227173 5014 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227182 5014 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227190 5014 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227197 5014 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227205 5014 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227218 5014 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227226 5014 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227236 5014 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.227244 5014 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.227257 5014 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.240673 5014 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.240720 5014 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240835 5014 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240848 5014 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240856 5014 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240866 5014 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240877 5014 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240888 5014 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240899 5014 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240907 5014 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240915 5014 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240923 5014 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240931 5014 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240939 5014 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240946 5014 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240954 5014 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240962 5014 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240970 5014 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240977 5014 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240985 5014 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.240993 5014 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241000 5014 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241008 5014 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241016 5014 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241023 5014 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241030 5014 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241038 5014 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241046 5014 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241053 5014 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241064 5014 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241074 5014 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241085 5014 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241094 5014 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241103 5014 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241111 5014 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241119 5014 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241129 5014 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241139 5014 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241149 5014 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241158 5014 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241166 5014 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241175 5014 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241183 5014 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241191 5014 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241199 5014 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241207 5014 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241215 5014 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241223 5014 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241230 5014 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241237 5014 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241245 5014 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241253 5014 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241260 5014 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241268 5014 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241275 5014 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241283 5014 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241290 5014 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241299 5014 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241307 5014 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241315 5014 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241322 5014 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241330 5014 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241338 5014 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241345 5014 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241353 5014 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241360 5014 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241368 5014 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241376 5014 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241383 5014 feature_gate.go:330] unrecognized feature gate: Example Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241391 5014 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241399 5014 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241406 5014 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241415 5014 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.241428 5014 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241669 5014 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241681 5014 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241690 5014 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241699 5014 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241707 5014 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241714 5014 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241722 5014 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241730 5014 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241739 5014 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241746 5014 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241754 5014 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241761 5014 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241769 5014 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241777 5014 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241785 5014 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241793 5014 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241805 5014 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241817 5014 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241826 5014 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241835 5014 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241844 5014 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241852 5014 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241861 5014 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241869 5014 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241879 5014 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241887 5014 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241895 5014 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241904 5014 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241912 5014 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241920 5014 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241928 5014 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241936 5014 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241944 5014 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241952 5014 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241961 5014 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241969 5014 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241977 5014 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241984 5014 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.241993 5014 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242002 5014 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242013 5014 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242022 5014 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242031 5014 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242041 5014 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242051 5014 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242060 5014 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242068 5014 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242076 5014 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242084 5014 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242093 5014 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242101 5014 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242108 5014 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242116 5014 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242124 5014 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242132 5014 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242140 5014 feature_gate.go:330] unrecognized feature gate: Example Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242147 5014 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242155 5014 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242163 5014 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242171 5014 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242178 5014 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242186 5014 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242196 5014 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242205 5014 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242214 5014 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242223 5014 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242232 5014 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242241 5014 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242250 5014 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242258 5014 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.242268 5014 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.242280 5014 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.242525 5014 server.go:940] "Client rotation is on, will bootstrap in background" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.249144 5014 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.249280 5014 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.251172 5014 server.go:997] "Starting client certificate rotation" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.251221 5014 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.253335 5014 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-18 08:37:30.281891177 +0000 UTC Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.253432 5014 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 1019h6m35.028464703s for next certificate rotation Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.281345 5014 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.287414 5014 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.301813 5014 log.go:25] "Validated CRI v1 runtime API" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.342572 5014 log.go:25] "Validated CRI v1 image API" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.344969 5014 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.350861 5014 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-10-06-21-26-16-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.350911 5014 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.382016 5014 manager.go:217] Machine: {Timestamp:2025-10-06 21:30:55.378337195 +0000 UTC m=+0.671374009 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799886 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:d96e7155-4816-42b4-95d2-01738aa57d05 BootID:d3fb3b65-6228-438f-aaa3-137984163b6d Filesystems:[{Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:23:3d:f0 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:23:3d:f0 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:bd:ab:8e Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:55:5d:56 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:87:25:e3 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:17:16:88 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:33:b4:76 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:96:69:84:b7:9c:7b Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:2e:10:7e:1c:2c:b4 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.382449 5014 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.382910 5014 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.385006 5014 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.385343 5014 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.385406 5014 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.385743 5014 topology_manager.go:138] "Creating topology manager with none policy" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.385762 5014 container_manager_linux.go:303] "Creating device plugin manager" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.386699 5014 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.386750 5014 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.387047 5014 state_mem.go:36] "Initialized new in-memory state store" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.387180 5014 server.go:1245] "Using root directory" path="/var/lib/kubelet" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.392361 5014 kubelet.go:418] "Attempting to sync node with API server" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.392410 5014 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.392452 5014 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.392481 5014 kubelet.go:324] "Adding apiserver pod source" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.392506 5014 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.397380 5014 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.399060 5014 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.400811 5014 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.403006 5014 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.403050 5014 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.403068 5014 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.403081 5014 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.403103 5014 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.403116 5014 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.403130 5014 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.403152 5014 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.403167 5014 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.403180 5014 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.403225 5014 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.403242 5014 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.403759 5014 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Oct 06 21:30:55 crc kubenswrapper[5014]: E1006 21:30:55.403869 5014 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.403873 5014 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Oct 06 21:30:55 crc kubenswrapper[5014]: E1006 21:30:55.404017 5014 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.403951 5014 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.404947 5014 server.go:1280] "Started kubelet" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.406863 5014 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Oct 06 21:30:55 crc systemd[1]: Started Kubernetes Kubelet. Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.406875 5014 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.408724 5014 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.409827 5014 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.409891 5014 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.409933 5014 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 11:03:13.401744834 +0000 UTC Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.410104 5014 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1813h32m17.991650115s for next certificate rotation Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.410387 5014 volume_manager.go:287] "The desired_state_of_world populator starts" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.410437 5014 volume_manager.go:289] "Starting Kubelet Volume Manager" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.410861 5014 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.410804 5014 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.411310 5014 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Oct 06 21:30:55 crc kubenswrapper[5014]: E1006 21:30:55.411737 5014 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Oct 06 21:30:55 crc kubenswrapper[5014]: E1006 21:30:55.412031 5014 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Oct 06 21:30:55 crc kubenswrapper[5014]: E1006 21:30:55.420777 5014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="200ms" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.420915 5014 factory.go:153] Registering CRI-O factory Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.420965 5014 factory.go:221] Registration of the crio container factory successfully Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.421774 5014 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.421861 5014 factory.go:55] Registering systemd factory Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.421922 5014 factory.go:221] Registration of the systemd container factory successfully Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.422150 5014 factory.go:103] Registering Raw factory Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.422541 5014 manager.go:1196] Started watching for new ooms in manager Oct 06 21:30:55 crc kubenswrapper[5014]: E1006 21:30:55.421871 5014 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.20:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.186c04468a061871 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-10-06 21:30:55.404898417 +0000 UTC m=+0.697935191,LastTimestamp:2025-10-06 21:30:55.404898417 +0000 UTC m=+0.697935191,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.428403 5014 manager.go:319] Starting recovery of all containers Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.428782 5014 server.go:460] "Adding debug handlers to kubelet server" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435333 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435440 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435470 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435499 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435524 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435548 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435575 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435656 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435693 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435719 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435743 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435768 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435793 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435821 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435844 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435866 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435931 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435956 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.435980 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436033 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436071 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436099 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436163 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436190 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436218 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436244 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436278 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436305 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436332 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436359 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436384 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436409 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436433 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436458 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436484 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436510 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436536 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436561 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436596 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436654 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436681 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436705 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436728 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436750 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436776 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436798 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436826 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436851 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436879 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436904 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436931 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436955 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.436992 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437023 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437052 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437080 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437111 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437139 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437180 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437206 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437232 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437257 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437293 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437317 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437344 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437369 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437446 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437469 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437488 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437508 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437530 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437593 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437615 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437670 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437689 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437707 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437727 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437746 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437764 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437781 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437799 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437826 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437844 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437863 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437882 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437927 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437947 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437966 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.437983 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438001 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438020 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438037 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438056 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438075 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438093 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438111 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438130 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438148 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438168 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438186 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438205 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438226 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438246 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438265 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438293 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438315 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438336 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438358 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438381 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438403 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438423 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438442 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438500 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438568 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438599 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438645 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438688 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438705 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438722 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438739 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438756 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438813 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438832 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438849 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438935 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438957 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438974 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.438991 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439010 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439027 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439046 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439072 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439201 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439218 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439236 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439254 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439272 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439289 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439307 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439324 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439370 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439409 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439427 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439445 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439463 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439479 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439497 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439517 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439643 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439666 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439687 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439766 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439789 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439837 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439855 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439874 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439893 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439910 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439927 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439945 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439965 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.439983 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440008 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440027 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440044 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440061 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440134 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440154 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440266 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440284 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440301 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440317 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440337 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440357 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440374 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440391 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440407 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440425 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440444 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440462 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440479 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440499 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440517 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440537 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440564 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440591 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440611 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440654 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440674 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.440700 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.446365 5014 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.446447 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.446486 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.446520 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.446549 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.446579 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.446607 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.446672 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.446701 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.446729 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.446794 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.446822 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.446847 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.446874 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.446905 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.447578 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.447645 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.447674 5014 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.447702 5014 reconstruct.go:97] "Volume reconstruction finished" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.447718 5014 reconciler.go:26] "Reconciler: start to sync state" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.457383 5014 manager.go:324] Recovery completed Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.479411 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.480953 5014 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.481343 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.481414 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.481440 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.482872 5014 cpu_manager.go:225] "Starting CPU manager" policy="none" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.482908 5014 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.482966 5014 state_mem.go:36] "Initialized new in-memory state store" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.483090 5014 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.483141 5014 status_manager.go:217] "Starting to sync pod status with apiserver" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.483175 5014 kubelet.go:2335] "Starting kubelet main sync loop" Oct 06 21:30:55 crc kubenswrapper[5014]: E1006 21:30:55.483243 5014 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.484121 5014 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Oct 06 21:30:55 crc kubenswrapper[5014]: E1006 21:30:55.484210 5014 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.496255 5014 policy_none.go:49] "None policy: Start" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.497352 5014 memory_manager.go:170] "Starting memorymanager" policy="None" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.497580 5014 state_mem.go:35] "Initializing new in-memory state store" Oct 06 21:30:55 crc kubenswrapper[5014]: E1006 21:30:55.512760 5014 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.564032 5014 manager.go:334] "Starting Device Plugin manager" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.564301 5014 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.564326 5014 server.go:79] "Starting device plugin registration server" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.564804 5014 eviction_manager.go:189] "Eviction manager: starting control loop" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.564825 5014 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.565032 5014 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.565218 5014 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.565241 5014 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Oct 06 21:30:55 crc kubenswrapper[5014]: E1006 21:30:55.574587 5014 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.583346 5014 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.583464 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.585114 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.585167 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.585190 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.585430 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.585708 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.585768 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.586934 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.587011 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.587037 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.587234 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.587285 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.587302 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.587358 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.587511 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.587560 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.589323 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.589401 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.589428 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.589339 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.589523 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.589553 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.589839 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.590019 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.590127 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.591297 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.591356 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.591382 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.591611 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.591870 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.591941 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.591963 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.591944 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.592118 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.592885 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.592940 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.592966 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.593263 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.593375 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.593269 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.593464 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.593482 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.595036 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.595097 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.595120 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:55 crc kubenswrapper[5014]: E1006 21:30:55.621925 5014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="400ms" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.649423 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.649598 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.649795 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.649888 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.649990 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.650086 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.650203 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.650311 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.650432 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.650532 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.650677 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.650786 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.650886 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.651060 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.651112 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.665312 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.666651 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.666694 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.666712 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.666746 5014 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 06 21:30:55 crc kubenswrapper[5014]: E1006 21:30:55.667392 5014 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.20:6443: connect: connection refused" node="crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752135 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752206 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752249 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752284 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752319 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752349 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752352 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752417 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752443 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752439 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752380 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752598 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752473 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752671 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752479 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752469 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752719 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752747 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752756 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752817 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752714 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752846 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752750 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752873 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752901 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752915 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.752960 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.753009 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.753096 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.753155 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.868125 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.869922 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.869984 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.870003 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.870040 5014 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 06 21:30:55 crc kubenswrapper[5014]: E1006 21:30:55.870536 5014 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.20:6443: connect: connection refused" node="crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.932518 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.955931 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.976262 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.980848 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-6d74f868740ced8eba478ec10c4cadf1285c4f00f3b5b063fdf2a05919585d4a WatchSource:0}: Error finding container 6d74f868740ced8eba478ec10c4cadf1285c4f00f3b5b063fdf2a05919585d4a: Status 404 returned error can't find the container with id 6d74f868740ced8eba478ec10c4cadf1285c4f00f3b5b063fdf2a05919585d4a Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.990112 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:30:55 crc kubenswrapper[5014]: W1006 21:30:55.992335 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-409be5129130e5a6f401217f56929381e207b146c16533850a9d754ba685380d WatchSource:0}: Error finding container 409be5129130e5a6f401217f56929381e207b146c16533850a9d754ba685380d: Status 404 returned error can't find the container with id 409be5129130e5a6f401217f56929381e207b146c16533850a9d754ba685380d Oct 06 21:30:55 crc kubenswrapper[5014]: I1006 21:30:55.997946 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 06 21:30:56 crc kubenswrapper[5014]: W1006 21:30:56.005301 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-60ae77df3f33541fc2273f57eed650a9cd74e10011150140fd04afafda020093 WatchSource:0}: Error finding container 60ae77df3f33541fc2273f57eed650a9cd74e10011150140fd04afafda020093: Status 404 returned error can't find the container with id 60ae77df3f33541fc2273f57eed650a9cd74e10011150140fd04afafda020093 Oct 06 21:30:56 crc kubenswrapper[5014]: W1006 21:30:56.018190 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-194eef9ea42bcf110fb2e8aff016b7ea760104dd92c798115867c1c155d45cd4 WatchSource:0}: Error finding container 194eef9ea42bcf110fb2e8aff016b7ea760104dd92c798115867c1c155d45cd4: Status 404 returned error can't find the container with id 194eef9ea42bcf110fb2e8aff016b7ea760104dd92c798115867c1c155d45cd4 Oct 06 21:30:56 crc kubenswrapper[5014]: E1006 21:30:56.023290 5014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="800ms" Oct 06 21:30:56 crc kubenswrapper[5014]: W1006 21:30:56.055197 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-d795bc34a09a1bc76e569086b9ec40ecd1ff427169507421cbce0011672d0d50 WatchSource:0}: Error finding container d795bc34a09a1bc76e569086b9ec40ecd1ff427169507421cbce0011672d0d50: Status 404 returned error can't find the container with id d795bc34a09a1bc76e569086b9ec40ecd1ff427169507421cbce0011672d0d50 Oct 06 21:30:56 crc kubenswrapper[5014]: I1006 21:30:56.270668 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:56 crc kubenswrapper[5014]: I1006 21:30:56.272161 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:56 crc kubenswrapper[5014]: I1006 21:30:56.272220 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:56 crc kubenswrapper[5014]: I1006 21:30:56.272238 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:56 crc kubenswrapper[5014]: I1006 21:30:56.272271 5014 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 06 21:30:56 crc kubenswrapper[5014]: E1006 21:30:56.272786 5014 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.20:6443: connect: connection refused" node="crc" Oct 06 21:30:56 crc kubenswrapper[5014]: W1006 21:30:56.322211 5014 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Oct 06 21:30:56 crc kubenswrapper[5014]: E1006 21:30:56.322368 5014 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Oct 06 21:30:56 crc kubenswrapper[5014]: I1006 21:30:56.408238 5014 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Oct 06 21:30:56 crc kubenswrapper[5014]: W1006 21:30:56.436412 5014 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Oct 06 21:30:56 crc kubenswrapper[5014]: E1006 21:30:56.436524 5014 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Oct 06 21:30:56 crc kubenswrapper[5014]: I1006 21:30:56.488583 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"60ae77df3f33541fc2273f57eed650a9cd74e10011150140fd04afafda020093"} Oct 06 21:30:56 crc kubenswrapper[5014]: I1006 21:30:56.490678 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"409be5129130e5a6f401217f56929381e207b146c16533850a9d754ba685380d"} Oct 06 21:30:56 crc kubenswrapper[5014]: I1006 21:30:56.493169 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"6d74f868740ced8eba478ec10c4cadf1285c4f00f3b5b063fdf2a05919585d4a"} Oct 06 21:30:56 crc kubenswrapper[5014]: I1006 21:30:56.494662 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"d795bc34a09a1bc76e569086b9ec40ecd1ff427169507421cbce0011672d0d50"} Oct 06 21:30:56 crc kubenswrapper[5014]: I1006 21:30:56.495764 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"194eef9ea42bcf110fb2e8aff016b7ea760104dd92c798115867c1c155d45cd4"} Oct 06 21:30:56 crc kubenswrapper[5014]: W1006 21:30:56.739979 5014 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Oct 06 21:30:56 crc kubenswrapper[5014]: E1006 21:30:56.740259 5014 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Oct 06 21:30:56 crc kubenswrapper[5014]: E1006 21:30:56.824159 5014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="1.6s" Oct 06 21:30:56 crc kubenswrapper[5014]: W1006 21:30:56.840673 5014 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Oct 06 21:30:56 crc kubenswrapper[5014]: E1006 21:30:56.840810 5014 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.073494 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.075223 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.075282 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.075300 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.075341 5014 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 06 21:30:57 crc kubenswrapper[5014]: E1006 21:30:57.075893 5014 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.20:6443: connect: connection refused" node="crc" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.408687 5014 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.503150 5014 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22" exitCode=0 Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.503265 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22"} Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.503368 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.505063 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.505111 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.505130 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.506194 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f"} Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.506235 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3"} Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.506250 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e"} Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.508228 5014 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7" exitCode=0 Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.508298 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7"} Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.508357 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.509592 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.509692 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.509719 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.509880 5014 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2" exitCode=0 Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.509968 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2"} Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.510014 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.511069 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.511106 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.511151 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.512060 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.512804 5014 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="9497062cd310a2e43ec77ff8cfc3b406613f5e4ff90d676be93045025290e3af" exitCode=0 Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.512830 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.512872 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.512889 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.512912 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"9497062cd310a2e43ec77ff8cfc3b406613f5e4ff90d676be93045025290e3af"} Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.512890 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.513583 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.513609 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:57 crc kubenswrapper[5014]: I1006 21:30:57.513642 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:58 crc kubenswrapper[5014]: W1006 21:30:58.272746 5014 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Oct 06 21:30:58 crc kubenswrapper[5014]: E1006 21:30:58.272868 5014 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.408470 5014 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Oct 06 21:30:58 crc kubenswrapper[5014]: E1006 21:30:58.425414 5014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.20:6443: connect: connection refused" interval="3.2s" Oct 06 21:30:58 crc kubenswrapper[5014]: W1006 21:30:58.485065 5014 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Oct 06 21:30:58 crc kubenswrapper[5014]: E1006 21:30:58.485215 5014 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.518721 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd"} Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.518785 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099"} Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.518804 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56"} Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.518822 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1"} Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.520744 5014 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313" exitCode=0 Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.520828 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313"} Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.521012 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.522690 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.522747 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.522764 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.531084 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.531096 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"dda4a7a92488e6ff8860810aa9163e98bffc9dc8c7ffe77e9e85d577af45202a"} Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.532352 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.532415 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.532433 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.533955 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a852a360b8208306c0db26edde70be0c758128fca997ba3a19cf6a3ceaf31240"} Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.534007 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"81f643a744f6ab40dd2ab7288d53704dabfc29d25088545333b7475bcabfeb79"} Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.534030 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"f1a3264bc2aabb1a0da4bd54eae121a419a29be7608fbd1811766ff00c8e123b"} Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.534161 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.535482 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.535520 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.535535 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.543375 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904"} Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.543490 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.544549 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.544577 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.544590 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:58 crc kubenswrapper[5014]: W1006 21:30:58.651801 5014 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.20:6443: connect: connection refused Oct 06 21:30:58 crc kubenswrapper[5014]: E1006 21:30:58.651884 5014 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.20:6443: connect: connection refused" logger="UnhandledError" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.676190 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.677317 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.677338 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.677346 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:58 crc kubenswrapper[5014]: I1006 21:30:58.677366 5014 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 06 21:30:58 crc kubenswrapper[5014]: E1006 21:30:58.677731 5014 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.20:6443: connect: connection refused" node="crc" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.101372 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.549008 5014 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78" exitCode=0 Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.549094 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78"} Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.549149 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.550212 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.550301 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.550320 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.553699 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875"} Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.553738 5014 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.553799 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.553831 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.553882 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.553912 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.555833 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.555857 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.555868 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.556394 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.556446 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.556472 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.556401 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.556527 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.556545 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.556469 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.556646 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.556663 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.656457 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:30:59 crc kubenswrapper[5014]: I1006 21:30:59.805573 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:31:00 crc kubenswrapper[5014]: I1006 21:31:00.141832 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:31:00 crc kubenswrapper[5014]: I1006 21:31:00.561747 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:31:00 crc kubenswrapper[5014]: I1006 21:31:00.561838 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3"} Oct 06 21:31:00 crc kubenswrapper[5014]: I1006 21:31:00.561913 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806"} Oct 06 21:31:00 crc kubenswrapper[5014]: I1006 21:31:00.561932 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab"} Oct 06 21:31:00 crc kubenswrapper[5014]: I1006 21:31:00.562128 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:31:00 crc kubenswrapper[5014]: I1006 21:31:00.564124 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:00 crc kubenswrapper[5014]: I1006 21:31:00.564227 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:00 crc kubenswrapper[5014]: I1006 21:31:00.564250 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:00 crc kubenswrapper[5014]: I1006 21:31:00.565094 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:00 crc kubenswrapper[5014]: I1006 21:31:00.565153 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:00 crc kubenswrapper[5014]: I1006 21:31:00.565172 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.415013 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.425538 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.571576 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9"} Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.571661 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d"} Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.571719 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.571805 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.571834 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.573571 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.573644 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.573652 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.573695 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.573664 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.573755 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.573717 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.573792 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.573810 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.878109 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.879706 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.879760 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.879786 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:01 crc kubenswrapper[5014]: I1006 21:31:01.879828 5014 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 06 21:31:02 crc kubenswrapper[5014]: I1006 21:31:02.574164 5014 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 06 21:31:02 crc kubenswrapper[5014]: I1006 21:31:02.574238 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:31:02 crc kubenswrapper[5014]: I1006 21:31:02.574194 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:31:02 crc kubenswrapper[5014]: I1006 21:31:02.575735 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:02 crc kubenswrapper[5014]: I1006 21:31:02.575788 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:02 crc kubenswrapper[5014]: I1006 21:31:02.575805 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:02 crc kubenswrapper[5014]: I1006 21:31:02.576842 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:02 crc kubenswrapper[5014]: I1006 21:31:02.576911 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:02 crc kubenswrapper[5014]: I1006 21:31:02.576937 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:04 crc kubenswrapper[5014]: I1006 21:31:04.071404 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 06 21:31:04 crc kubenswrapper[5014]: I1006 21:31:04.071766 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:31:04 crc kubenswrapper[5014]: I1006 21:31:04.073452 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:04 crc kubenswrapper[5014]: I1006 21:31:04.073519 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:04 crc kubenswrapper[5014]: I1006 21:31:04.073539 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:04 crc kubenswrapper[5014]: I1006 21:31:04.869360 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:31:04 crc kubenswrapper[5014]: I1006 21:31:04.869680 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:31:04 crc kubenswrapper[5014]: I1006 21:31:04.871426 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:04 crc kubenswrapper[5014]: I1006 21:31:04.871487 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:04 crc kubenswrapper[5014]: I1006 21:31:04.871517 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:05 crc kubenswrapper[5014]: I1006 21:31:05.562143 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Oct 06 21:31:05 crc kubenswrapper[5014]: I1006 21:31:05.562408 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:31:05 crc kubenswrapper[5014]: I1006 21:31:05.564505 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:05 crc kubenswrapper[5014]: I1006 21:31:05.564569 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:05 crc kubenswrapper[5014]: I1006 21:31:05.564592 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:05 crc kubenswrapper[5014]: E1006 21:31:05.575008 5014 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Oct 06 21:31:05 crc kubenswrapper[5014]: I1006 21:31:05.630236 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Oct 06 21:31:05 crc kubenswrapper[5014]: I1006 21:31:05.630514 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:31:05 crc kubenswrapper[5014]: I1006 21:31:05.632001 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:05 crc kubenswrapper[5014]: I1006 21:31:05.632205 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:05 crc kubenswrapper[5014]: I1006 21:31:05.632339 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:08 crc kubenswrapper[5014]: I1006 21:31:08.447418 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:31:08 crc kubenswrapper[5014]: I1006 21:31:08.447672 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:31:08 crc kubenswrapper[5014]: I1006 21:31:08.449106 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:08 crc kubenswrapper[5014]: I1006 21:31:08.449153 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:08 crc kubenswrapper[5014]: I1006 21:31:08.449165 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:08 crc kubenswrapper[5014]: I1006 21:31:08.453753 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:31:08 crc kubenswrapper[5014]: I1006 21:31:08.591912 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:31:08 crc kubenswrapper[5014]: I1006 21:31:08.593036 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:08 crc kubenswrapper[5014]: I1006 21:31:08.593116 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:08 crc kubenswrapper[5014]: I1006 21:31:08.593135 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:09 crc kubenswrapper[5014]: W1006 21:31:09.195737 5014 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Oct 06 21:31:09 crc kubenswrapper[5014]: I1006 21:31:09.195879 5014 trace.go:236] Trace[1543928061]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (06-Oct-2025 21:30:59.194) (total time: 10001ms): Oct 06 21:31:09 crc kubenswrapper[5014]: Trace[1543928061]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (21:31:09.195) Oct 06 21:31:09 crc kubenswrapper[5014]: Trace[1543928061]: [10.00155978s] [10.00155978s] END Oct 06 21:31:09 crc kubenswrapper[5014]: E1006 21:31:09.195912 5014 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Oct 06 21:31:09 crc kubenswrapper[5014]: I1006 21:31:09.286711 5014 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Oct 06 21:31:09 crc kubenswrapper[5014]: I1006 21:31:09.286793 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Oct 06 21:31:09 crc kubenswrapper[5014]: I1006 21:31:09.293321 5014 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Oct 06 21:31:09 crc kubenswrapper[5014]: I1006 21:31:09.293392 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Oct 06 21:31:09 crc kubenswrapper[5014]: I1006 21:31:09.806578 5014 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Oct 06 21:31:09 crc kubenswrapper[5014]: I1006 21:31:09.806666 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Oct 06 21:31:10 crc kubenswrapper[5014]: I1006 21:31:10.148205 5014 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]log ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]etcd ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/start-apiserver-admission-initializer ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/openshift.io-api-request-count-filter ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/openshift.io-startkubeinformers ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/generic-apiserver-start-informers ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/priority-and-fairness-config-consumer ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/priority-and-fairness-filter ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/storage-object-count-tracker-hook ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/start-apiextensions-informers ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/start-apiextensions-controllers ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/crd-informer-synced ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/start-system-namespaces-controller ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/start-cluster-authentication-info-controller ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/start-legacy-token-tracking-controller ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/start-service-ip-repair-controllers ok Oct 06 21:31:10 crc kubenswrapper[5014]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Oct 06 21:31:10 crc kubenswrapper[5014]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/priority-and-fairness-config-producer ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/bootstrap-controller ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/start-kube-aggregator-informers ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/apiservice-status-local-available-controller ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/apiservice-status-remote-available-controller ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/apiservice-registration-controller ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/apiservice-wait-for-first-sync ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/apiservice-discovery-controller ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/kube-apiserver-autoregistration ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]autoregister-completion ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/apiservice-openapi-controller ok Oct 06 21:31:10 crc kubenswrapper[5014]: [+]poststarthook/apiservice-openapiv3-controller ok Oct 06 21:31:10 crc kubenswrapper[5014]: livez check failed Oct 06 21:31:10 crc kubenswrapper[5014]: I1006 21:31:10.148365 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:31:11 crc kubenswrapper[5014]: I1006 21:31:11.448112 5014 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 06 21:31:11 crc kubenswrapper[5014]: I1006 21:31:11.448194 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 06 21:31:13 crc kubenswrapper[5014]: I1006 21:31:13.570129 5014 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.268567 5014 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.270802 5014 trace.go:236] Trace[908853825]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (06-Oct-2025 21:31:03.703) (total time: 10567ms): Oct 06 21:31:14 crc kubenswrapper[5014]: Trace[908853825]: ---"Objects listed" error: 10567ms (21:31:14.270) Oct 06 21:31:14 crc kubenswrapper[5014]: Trace[908853825]: [10.567150422s] [10.567150422s] END Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.270843 5014 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.271323 5014 trace.go:236] Trace[1733863353]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (06-Oct-2025 21:31:01.731) (total time: 12540ms): Oct 06 21:31:14 crc kubenswrapper[5014]: Trace[1733863353]: ---"Objects listed" error: 12540ms (21:31:14.271) Oct 06 21:31:14 crc kubenswrapper[5014]: Trace[1733863353]: [12.540145452s] [12.540145452s] END Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.271360 5014 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.271698 5014 trace.go:236] Trace[950299828]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (06-Oct-2025 21:31:03.418) (total time: 10852ms): Oct 06 21:31:14 crc kubenswrapper[5014]: Trace[950299828]: ---"Objects listed" error: 10852ms (21:31:14.271) Oct 06 21:31:14 crc kubenswrapper[5014]: Trace[950299828]: [10.852748162s] [10.852748162s] END Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.271722 5014 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.272337 5014 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.299662 5014 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.405032 5014 apiserver.go:52] "Watching apiserver" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.414265 5014 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.414989 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c"] Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.416037 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.416124 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.416202 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.416497 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.416544 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.416595 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.416652 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.416712 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.417140 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.419581 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.419869 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.420113 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.421296 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.421315 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.421484 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.421611 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.421678 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.421784 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.451940 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.469970 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.484724 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.494838 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.508449 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.511887 5014 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.541272 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.556028 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.567572 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.576306 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.585801 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.595786 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602148 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602201 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602223 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602241 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602258 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602276 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602294 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602309 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602324 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602339 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602353 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602369 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602409 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602424 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602438 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602454 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602469 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602486 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602499 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602593 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602760 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602795 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602784 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602824 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602893 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602921 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.602981 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.603029 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.603180 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.603363 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.603373 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.603394 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.603475 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.603523 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.603569 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.603754 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.603775 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.603813 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.603801 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.603831 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.603863 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.603995 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.604031 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.604219 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.604225 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.604300 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.604407 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.604433 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.604448 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.604493 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.604510 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.604528 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605060 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.604963 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.604964 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605014 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605034 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605057 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605079 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605170 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605201 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605224 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605231 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605246 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605320 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605344 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605442 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605503 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605638 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605783 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.605886 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.606121 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.606226 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.606250 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.606506 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.606559 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.606613 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.606879 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.607062 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.607119 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.607139 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.607156 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.607510 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.607794 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.607480 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.607523 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.607746 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.607874 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.607901 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.607942 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.607958 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.607973 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608279 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608301 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608040 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608209 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608231 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608252 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608544 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608672 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608546 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608726 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608740 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608771 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608792 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608807 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608867 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608903 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608935 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.608969 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609028 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609064 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609099 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609129 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609145 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609264 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609322 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609353 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609159 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609493 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609543 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609530 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609597 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609708 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609719 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609727 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.609685 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.609786 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:31:15.109765452 +0000 UTC m=+20.402802186 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610052 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610117 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610168 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610317 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610348 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610365 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610381 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610549 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610577 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610575 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610612 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610653 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610670 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610686 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610703 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610746 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610762 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610776 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610791 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610806 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610821 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610834 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610849 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610864 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610868 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610878 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610895 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610911 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610925 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610938 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610951 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610965 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610979 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.610994 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611007 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611022 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611036 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611051 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611065 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611078 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611093 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611106 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611125 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611139 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611154 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611168 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611182 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611215 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611229 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611243 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611259 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611273 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611287 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611301 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611315 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611343 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611357 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611370 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611384 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611397 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611412 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611427 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611443 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611456 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611471 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611486 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611502 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611518 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611532 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611555 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611570 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611584 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611598 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611612 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611653 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611668 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611687 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611701 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611716 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611731 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611748 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611763 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611776 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611792 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611806 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611823 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611837 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611852 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611866 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611879 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611894 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611909 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611924 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611938 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611954 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611969 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611983 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.611998 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612013 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612027 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612042 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612056 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612071 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612085 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612100 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612114 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612129 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612144 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612159 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612174 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612188 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612202 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612217 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612232 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612247 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612262 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612277 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612297 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612312 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612327 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612342 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612356 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612372 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612389 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612407 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612426 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612440 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612456 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612488 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612503 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612519 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612534 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612550 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612566 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612581 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612645 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612673 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612690 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612705 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612721 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612736 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612753 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612772 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612790 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612805 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612822 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612846 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612865 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612881 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612932 5014 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612942 5014 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612952 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612961 5014 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612970 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612979 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612989 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.612998 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613006 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613015 5014 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613024 5014 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613033 5014 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613042 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613051 5014 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613061 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613071 5014 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613080 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613089 5014 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613098 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613107 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613117 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613126 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613139 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613148 5014 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613157 5014 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613165 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613175 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613184 5014 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613192 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613200 5014 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613210 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613219 5014 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613228 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613236 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613246 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613254 5014 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613263 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613272 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613280 5014 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613289 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613297 5014 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613307 5014 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613318 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613328 5014 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613337 5014 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613345 5014 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613354 5014 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613363 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613372 5014 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613380 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613389 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613398 5014 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613407 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613416 5014 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613426 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613435 5014 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613444 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613453 5014 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613461 5014 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613471 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613480 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613496 5014 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613505 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613514 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613523 5014 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613531 5014 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.613990 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.614191 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.614371 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.614552 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.623549 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.623919 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.624376 5014 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875" exitCode=255 Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.624412 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875"} Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.624503 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.624756 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.625014 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.625035 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.625334 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.625380 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.625552 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.625754 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.625943 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.626133 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.626225 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.626386 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.626725 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.626756 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.627010 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.627048 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.627251 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.627270 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.627469 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.627683 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.627989 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.628272 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.628403 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.628652 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.628710 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.628717 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.628854 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.629017 5014 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.629099 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:15.129075504 +0000 UTC m=+20.422112278 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.629164 5014 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.629233 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:15.129212329 +0000 UTC m=+20.422249193 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.629400 5014 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.630417 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.629579 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.629759 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.630096 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.630143 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.631096 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.631229 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.631436 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.631471 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.631530 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.632050 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.632061 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.632404 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.632411 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.632477 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.632490 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.632733 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.632875 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.633120 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.633129 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.633144 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.633463 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.633531 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.633892 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.634051 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.634462 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.634506 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.634533 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.634124 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.634916 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.635137 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.635426 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.635453 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.635646 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.636439 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.637058 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.637095 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.637165 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.637301 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.637316 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.637606 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.637930 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.638128 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.638191 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.638260 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.638428 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.638687 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.638827 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.639034 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.639114 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.639271 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.639286 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.639376 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.640452 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.640833 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.640918 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.640950 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.640998 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.641193 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.641195 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.641260 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.637944 5014 scope.go:117] "RemoveContainer" containerID="a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.640077 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.652255 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.654294 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.652709 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.654344 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.652763 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.653116 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.653112 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.653152 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.653202 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.653454 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.653488 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.653813 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.654273 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.654493 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.654507 5014 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.654562 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:15.154546376 +0000 UTC m=+20.447583110 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.654578 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.654813 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.654849 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.654879 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.654903 5014 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:14 crc kubenswrapper[5014]: E1006 21:31:14.655002 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:15.154942329 +0000 UTC m=+20.447979073 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.655300 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.655361 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.655383 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.654846 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.655458 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.655518 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.655654 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.656304 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.656782 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.656805 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.657839 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.658955 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.660703 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.660738 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.660840 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.661346 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.661473 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.661604 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.661843 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.661930 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.662106 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.662490 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.663773 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.664360 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.672991 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.673824 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.673841 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.683464 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.685428 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.687652 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.698085 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.699939 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.711351 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714395 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714447 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714496 5014 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714507 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714516 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714526 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714537 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714545 5014 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714553 5014 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714561 5014 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714569 5014 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714577 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714585 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714593 5014 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714602 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714609 5014 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714665 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714659 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714675 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714719 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714744 5014 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714764 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714778 5014 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714791 5014 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714802 5014 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714817 5014 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714828 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714839 5014 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714851 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714863 5014 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714875 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714886 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714897 5014 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714907 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714919 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714931 5014 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714942 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714953 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714976 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.714990 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715002 5014 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715015 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715034 5014 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715045 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715057 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715069 5014 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715079 5014 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715090 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715100 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715112 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715123 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715135 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715146 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715157 5014 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715168 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715179 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715189 5014 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715200 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715212 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715223 5014 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715234 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715245 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715255 5014 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715266 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715277 5014 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715287 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715298 5014 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715309 5014 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715320 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715331 5014 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715341 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715353 5014 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715364 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715374 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715384 5014 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715402 5014 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715414 5014 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715425 5014 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715435 5014 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715446 5014 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715457 5014 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715467 5014 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715477 5014 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715489 5014 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715501 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715513 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715525 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715536 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715547 5014 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715558 5014 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715568 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715578 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715588 5014 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715601 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715629 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715641 5014 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715652 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715662 5014 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715673 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715685 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715697 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715708 5014 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715718 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715729 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715740 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715751 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715763 5014 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715774 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715785 5014 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715796 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715807 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715818 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715828 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715840 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715851 5014 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715862 5014 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715873 5014 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715883 5014 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715894 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715905 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715916 5014 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715927 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715937 5014 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715948 5014 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715959 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715970 5014 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715985 5014 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.715995 5014 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.716006 5014 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.716017 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.716028 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.716039 5014 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.720010 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.733288 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.743984 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 06 21:31:14 crc kubenswrapper[5014]: I1006 21:31:14.755447 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 06 21:31:14 crc kubenswrapper[5014]: W1006 21:31:14.771128 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-373f176a3ebe4118ca8c8f5276147fec480cfed7a5a5a7bb25a4140db19a936d WatchSource:0}: Error finding container 373f176a3ebe4118ca8c8f5276147fec480cfed7a5a5a7bb25a4140db19a936d: Status 404 returned error can't find the container with id 373f176a3ebe4118ca8c8f5276147fec480cfed7a5a5a7bb25a4140db19a936d Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.118422 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:31:15 crc kubenswrapper[5014]: E1006 21:31:15.118566 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:31:16.118538759 +0000 UTC m=+21.411575493 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.147416 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.157446 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.167988 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.179486 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.189977 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.205303 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.216151 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.219017 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.219068 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.219095 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.219118 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:15 crc kubenswrapper[5014]: E1006 21:31:15.219186 5014 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 06 21:31:15 crc kubenswrapper[5014]: E1006 21:31:15.219227 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 06 21:31:15 crc kubenswrapper[5014]: E1006 21:31:15.219244 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:16.219228275 +0000 UTC m=+21.512265009 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 06 21:31:15 crc kubenswrapper[5014]: E1006 21:31:15.219244 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 06 21:31:15 crc kubenswrapper[5014]: E1006 21:31:15.219186 5014 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 06 21:31:15 crc kubenswrapper[5014]: E1006 21:31:15.219261 5014 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:15 crc kubenswrapper[5014]: E1006 21:31:15.219194 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 06 21:31:15 crc kubenswrapper[5014]: E1006 21:31:15.219297 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 06 21:31:15 crc kubenswrapper[5014]: E1006 21:31:15.219312 5014 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:15 crc kubenswrapper[5014]: E1006 21:31:15.219279 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:16.219271597 +0000 UTC m=+21.512308331 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 06 21:31:15 crc kubenswrapper[5014]: E1006 21:31:15.219358 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:16.219347159 +0000 UTC m=+21.512383893 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:15 crc kubenswrapper[5014]: E1006 21:31:15.219375 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:16.21936656 +0000 UTC m=+21.512403404 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.228848 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.488390 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.489253 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.490368 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.491282 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.492033 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.492734 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.495046 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.495981 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.496570 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.497172 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.497848 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.498758 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.499345 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.499876 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.500359 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.500872 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.501417 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.503631 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.504258 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.505043 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.506085 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.506194 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.506912 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.507869 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.508491 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.508896 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.509892 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.510944 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.511398 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.512036 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.513059 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.513534 5014 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.513652 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.515661 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.516121 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.516541 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.518140 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.519270 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.519761 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.520848 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.521481 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.522488 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.523223 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.524544 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.524520 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.526016 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.526838 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.527994 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.529085 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.530864 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.531571 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.532407 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.533465 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.534354 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.535450 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.536041 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.539181 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.557214 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.609396 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.627247 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"373f176a3ebe4118ca8c8f5276147fec480cfed7a5a5a7bb25a4140db19a936d"} Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.629127 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03"} Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.629159 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f"} Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.629170 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"8153dfeee3807516e15e513faf89cb7c17be4f0c7fb7881a8d334fc2b1a65d30"} Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.629954 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-rmjtc"] Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.630306 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-rmjtc" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.630824 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173"} Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.630851 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"de37dc51482be2197a6d2d23a0a08ac9706db5c4738e0093bd5c53bccb1d077e"} Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.632600 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.633136 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.635191 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f"} Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.635449 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.646224 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.647885 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.648217 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.657867 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.680011 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.681478 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.712057 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.723047 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cn8pk\" (UniqueName: \"kubernetes.io/projected/c53c82a2-7b51-49c1-88f4-fdb5df783712-kube-api-access-cn8pk\") pod \"node-resolver-rmjtc\" (UID: \"c53c82a2-7b51-49c1-88f4-fdb5df783712\") " pod="openshift-dns/node-resolver-rmjtc" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.723108 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/c53c82a2-7b51-49c1-88f4-fdb5df783712-hosts-file\") pod \"node-resolver-rmjtc\" (UID: \"c53c82a2-7b51-49c1-88f4-fdb5df783712\") " pod="openshift-dns/node-resolver-rmjtc" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.734078 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.749717 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.763035 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.788520 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.791126 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.803441 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.819098 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.824132 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/c53c82a2-7b51-49c1-88f4-fdb5df783712-hosts-file\") pod \"node-resolver-rmjtc\" (UID: \"c53c82a2-7b51-49c1-88f4-fdb5df783712\") " pod="openshift-dns/node-resolver-rmjtc" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.824320 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cn8pk\" (UniqueName: \"kubernetes.io/projected/c53c82a2-7b51-49c1-88f4-fdb5df783712-kube-api-access-cn8pk\") pod \"node-resolver-rmjtc\" (UID: \"c53c82a2-7b51-49c1-88f4-fdb5df783712\") " pod="openshift-dns/node-resolver-rmjtc" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.824318 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/c53c82a2-7b51-49c1-88f4-fdb5df783712-hosts-file\") pod \"node-resolver-rmjtc\" (UID: \"c53c82a2-7b51-49c1-88f4-fdb5df783712\") " pod="openshift-dns/node-resolver-rmjtc" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.833604 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.843430 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cn8pk\" (UniqueName: \"kubernetes.io/projected/c53c82a2-7b51-49c1-88f4-fdb5df783712-kube-api-access-cn8pk\") pod \"node-resolver-rmjtc\" (UID: \"c53c82a2-7b51-49c1-88f4-fdb5df783712\") " pod="openshift-dns/node-resolver-rmjtc" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.849181 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:15 crc kubenswrapper[5014]: I1006 21:31:15.942857 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-rmjtc" Oct 06 21:31:15 crc kubenswrapper[5014]: W1006 21:31:15.956515 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc53c82a2_7b51_49c1_88f4_fdb5df783712.slice/crio-5e6e87662d7a38782ed23cb806fdfa97cc0365ca1d14c00ace1b08b6218d8a27 WatchSource:0}: Error finding container 5e6e87662d7a38782ed23cb806fdfa97cc0365ca1d14c00ace1b08b6218d8a27: Status 404 returned error can't find the container with id 5e6e87662d7a38782ed23cb806fdfa97cc0365ca1d14c00ace1b08b6218d8a27 Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.054505 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-6bths"] Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.054972 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-8ddbf"] Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.054996 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.055360 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.055424 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-gqd7v"] Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.056328 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.056659 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.057127 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.058389 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-2wj75"] Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.059163 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.059271 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.059472 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.059806 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.059939 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.060036 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.062323 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.062449 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.062524 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.062650 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.062489 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.064875 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.066928 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.067048 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.066928 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.067275 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.067395 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.067433 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.082169 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.098263 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.109333 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.120147 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.126780 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:31:16 crc kubenswrapper[5014]: E1006 21:31:16.126990 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:31:18.126965488 +0000 UTC m=+23.420002222 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.135531 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.148880 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.158024 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.172146 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.191445 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.203745 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.216550 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.226917 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227394 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-etc-kubernetes\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227440 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-log-socket\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227463 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-system-cni-dir\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227484 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-run-k8s-cni-cncf-io\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227538 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-run-netns\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227596 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcbj4\" (UniqueName: \"kubernetes.io/projected/5d2de4ac-a423-4f5a-904a-817553f204f6-kube-api-access-gcbj4\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227644 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-cnibin\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227667 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-os-release\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227691 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-run-multus-certs\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227712 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-var-lib-openvswitch\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227734 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227759 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-multus-conf-dir\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227779 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f5c94fe2-2a02-4515-bc83-234827e59e4f-cni-binary-copy\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227798 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/f5c94fe2-2a02-4515-bc83-234827e59e4f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227818 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-cni-netd\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227838 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-var-lib-cni-multus\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227857 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89vs2\" (UniqueName: \"kubernetes.io/projected/9f1464a5-d713-4f79-8248-33c69abcdac2-kube-api-access-89vs2\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.227944 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f5c94fe2-2a02-4515-bc83-234827e59e4f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228074 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-multus-cni-dir\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228144 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p22wl\" (UniqueName: \"kubernetes.io/projected/f5c94fe2-2a02-4515-bc83-234827e59e4f-kube-api-access-p22wl\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228186 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228217 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-kubelet\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228242 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-run-netns\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228267 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-slash\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228288 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-etc-openvswitch\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: E1006 21:31:16.228306 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 06 21:31:16 crc kubenswrapper[5014]: E1006 21:31:16.228329 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228327 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-ovn\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: E1006 21:31:16.228344 5014 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228359 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-cni-bin\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: E1006 21:31:16.228391 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:18.228374817 +0000 UTC m=+23.521411631 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228413 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-env-overrides\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228437 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f5c94fe2-2a02-4515-bc83-234827e59e4f-system-cni-dir\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228457 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/33478e0f-9143-4e11-96a1-04c53f0f6277-rootfs\") pod \"machine-config-daemon-6bths\" (UID: \"33478e0f-9143-4e11-96a1-04c53f0f6277\") " pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228487 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228513 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-node-log\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228565 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228596 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-multus-socket-dir-parent\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228635 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-var-lib-kubelet\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228658 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/33478e0f-9143-4e11-96a1-04c53f0f6277-proxy-tls\") pod \"machine-config-daemon-6bths\" (UID: \"33478e0f-9143-4e11-96a1-04c53f0f6277\") " pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228686 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-ovnkube-config\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228708 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-ovnkube-script-lib\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228734 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:16 crc kubenswrapper[5014]: E1006 21:31:16.228734 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 06 21:31:16 crc kubenswrapper[5014]: E1006 21:31:16.228786 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 06 21:31:16 crc kubenswrapper[5014]: E1006 21:31:16.228787 5014 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 06 21:31:16 crc kubenswrapper[5014]: E1006 21:31:16.228806 5014 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228758 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f5c94fe2-2a02-4515-bc83-234827e59e4f-cnibin\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: E1006 21:31:16.228849 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:18.228830763 +0000 UTC m=+23.521867507 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228865 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qn2z\" (UniqueName: \"kubernetes.io/projected/33478e0f-9143-4e11-96a1-04c53f0f6277-kube-api-access-9qn2z\") pod \"machine-config-daemon-6bths\" (UID: \"33478e0f-9143-4e11-96a1-04c53f0f6277\") " pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:31:16 crc kubenswrapper[5014]: E1006 21:31:16.228884 5014 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 06 21:31:16 crc kubenswrapper[5014]: E1006 21:31:16.228885 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:18.228861664 +0000 UTC m=+23.521898398 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228933 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-systemd-units\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: E1006 21:31:16.228960 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:18.228949236 +0000 UTC m=+23.521985980 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.228992 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-systemd\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.229017 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5d2de4ac-a423-4f5a-904a-817553f204f6-ovn-node-metrics-cert\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.229042 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9f1464a5-d713-4f79-8248-33c69abcdac2-cni-binary-copy\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.229061 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-var-lib-cni-bin\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.229083 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f5c94fe2-2a02-4515-bc83-234827e59e4f-os-release\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.229104 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/33478e0f-9143-4e11-96a1-04c53f0f6277-mcd-auth-proxy-config\") pod \"machine-config-daemon-6bths\" (UID: \"33478e0f-9143-4e11-96a1-04c53f0f6277\") " pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.229124 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-openvswitch\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.229156 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-hostroot\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.229178 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/9f1464a5-d713-4f79-8248-33c69abcdac2-multus-daemon-config\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.229198 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-run-ovn-kubernetes\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.243071 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.275224 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.292894 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.318013 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330294 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p22wl\" (UniqueName: \"kubernetes.io/projected/f5c94fe2-2a02-4515-bc83-234827e59e4f-kube-api-access-p22wl\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330325 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-multus-cni-dir\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330341 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-run-netns\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330362 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-kubelet\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330377 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-etc-openvswitch\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330392 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-ovn\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330406 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-cni-bin\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330420 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-env-overrides\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330435 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-slash\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330451 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f5c94fe2-2a02-4515-bc83-234827e59e4f-system-cni-dir\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330468 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/33478e0f-9143-4e11-96a1-04c53f0f6277-rootfs\") pod \"machine-config-daemon-6bths\" (UID: \"33478e0f-9143-4e11-96a1-04c53f0f6277\") " pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330483 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-node-log\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330503 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/33478e0f-9143-4e11-96a1-04c53f0f6277-proxy-tls\") pod \"machine-config-daemon-6bths\" (UID: \"33478e0f-9143-4e11-96a1-04c53f0f6277\") " pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330517 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-ovnkube-config\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330531 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-ovnkube-script-lib\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330552 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-multus-socket-dir-parent\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330565 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-var-lib-kubelet\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330580 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qn2z\" (UniqueName: \"kubernetes.io/projected/33478e0f-9143-4e11-96a1-04c53f0f6277-kube-api-access-9qn2z\") pod \"machine-config-daemon-6bths\" (UID: \"33478e0f-9143-4e11-96a1-04c53f0f6277\") " pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330594 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-systemd-units\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330609 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-systemd\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330644 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5d2de4ac-a423-4f5a-904a-817553f204f6-ovn-node-metrics-cert\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330666 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f5c94fe2-2a02-4515-bc83-234827e59e4f-cnibin\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330681 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-var-lib-cni-bin\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330696 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f5c94fe2-2a02-4515-bc83-234827e59e4f-os-release\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330711 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/33478e0f-9143-4e11-96a1-04c53f0f6277-mcd-auth-proxy-config\") pod \"machine-config-daemon-6bths\" (UID: \"33478e0f-9143-4e11-96a1-04c53f0f6277\") " pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330725 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-openvswitch\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330741 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9f1464a5-d713-4f79-8248-33c69abcdac2-cni-binary-copy\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330757 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/9f1464a5-d713-4f79-8248-33c69abcdac2-multus-daemon-config\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330773 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-run-ovn-kubernetes\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330788 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-hostroot\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330803 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-etc-kubernetes\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330817 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-system-cni-dir\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330833 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-run-k8s-cni-cncf-io\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330848 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-run-netns\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330862 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-log-socket\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330876 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcbj4\" (UniqueName: \"kubernetes.io/projected/5d2de4ac-a423-4f5a-904a-817553f204f6-kube-api-access-gcbj4\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330891 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-var-lib-openvswitch\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330905 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330920 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-cnibin\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330933 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-os-release\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330948 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-run-multus-certs\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330962 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/f5c94fe2-2a02-4515-bc83-234827e59e4f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330977 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-multus-conf-dir\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.330991 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f5c94fe2-2a02-4515-bc83-234827e59e4f-cni-binary-copy\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.331006 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-cni-netd\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.331020 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f5c94fe2-2a02-4515-bc83-234827e59e4f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.331034 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-var-lib-cni-multus\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.331049 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89vs2\" (UniqueName: \"kubernetes.io/projected/9f1464a5-d713-4f79-8248-33c69abcdac2-kube-api-access-89vs2\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.331468 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-multus-cni-dir\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.331500 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-run-netns\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.331520 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-kubelet\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.331538 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-etc-openvswitch\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.331556 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-ovn\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.331573 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-cni-bin\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.332027 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-env-overrides\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.332061 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-slash\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.332081 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f5c94fe2-2a02-4515-bc83-234827e59e4f-system-cni-dir\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.332100 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/33478e0f-9143-4e11-96a1-04c53f0f6277-rootfs\") pod \"machine-config-daemon-6bths\" (UID: \"33478e0f-9143-4e11-96a1-04c53f0f6277\") " pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.332120 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-node-log\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.332861 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-hostroot\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.332982 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-cnibin\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.333059 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-etc-kubernetes\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.333101 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-system-cni-dir\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.333128 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-run-k8s-cni-cncf-io\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.333153 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-run-netns\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.333177 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-log-socket\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.333285 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-ovnkube-config\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.333449 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-var-lib-openvswitch\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.333486 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.333512 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-multus-conf-dir\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.333714 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-ovnkube-script-lib\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.333724 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-os-release\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.333752 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-run-multus-certs\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.333768 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-multus-socket-dir-parent\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.333791 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-var-lib-kubelet\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.333920 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-systemd-units\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.333947 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-systemd\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.334340 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/f5c94fe2-2a02-4515-bc83-234827e59e4f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.334853 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/33478e0f-9143-4e11-96a1-04c53f0f6277-mcd-auth-proxy-config\") pod \"machine-config-daemon-6bths\" (UID: \"33478e0f-9143-4e11-96a1-04c53f0f6277\") " pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.334894 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f5c94fe2-2a02-4515-bc83-234827e59e4f-cnibin\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.334923 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-var-lib-cni-bin\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.334965 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f5c94fe2-2a02-4515-bc83-234827e59e4f-os-release\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.335409 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/33478e0f-9143-4e11-96a1-04c53f0f6277-proxy-tls\") pod \"machine-config-daemon-6bths\" (UID: \"33478e0f-9143-4e11-96a1-04c53f0f6277\") " pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.335464 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9f1464a5-d713-4f79-8248-33c69abcdac2-cni-binary-copy\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.335502 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-openvswitch\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.335738 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-run-ovn-kubernetes\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.335791 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-cni-netd\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.335822 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/9f1464a5-d713-4f79-8248-33c69abcdac2-host-var-lib-cni-multus\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.335881 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f5c94fe2-2a02-4515-bc83-234827e59e4f-cni-binary-copy\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.335950 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/9f1464a5-d713-4f79-8248-33c69abcdac2-multus-daemon-config\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.336169 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5d2de4ac-a423-4f5a-904a-817553f204f6-ovn-node-metrics-cert\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.336320 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f5c94fe2-2a02-4515-bc83-234827e59e4f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.347721 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.349914 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89vs2\" (UniqueName: \"kubernetes.io/projected/9f1464a5-d713-4f79-8248-33c69abcdac2-kube-api-access-89vs2\") pod \"multus-8ddbf\" (UID: \"9f1464a5-d713-4f79-8248-33c69abcdac2\") " pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.350480 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p22wl\" (UniqueName: \"kubernetes.io/projected/f5c94fe2-2a02-4515-bc83-234827e59e4f-kube-api-access-p22wl\") pod \"multus-additional-cni-plugins-gqd7v\" (UID: \"f5c94fe2-2a02-4515-bc83-234827e59e4f\") " pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.351212 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qn2z\" (UniqueName: \"kubernetes.io/projected/33478e0f-9143-4e11-96a1-04c53f0f6277-kube-api-access-9qn2z\") pod \"machine-config-daemon-6bths\" (UID: \"33478e0f-9143-4e11-96a1-04c53f0f6277\") " pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.352044 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcbj4\" (UniqueName: \"kubernetes.io/projected/5d2de4ac-a423-4f5a-904a-817553f204f6-kube-api-access-gcbj4\") pod \"ovnkube-node-2wj75\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.359024 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.368941 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.375520 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.385034 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-8ddbf" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.395129 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: W1006 21:31:16.405435 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9f1464a5_d713_4f79_8248_33c69abcdac2.slice/crio-4f7d816a2b2500f8094da949ce41902ac7d974639e3dccccf09819353f2f7e80 WatchSource:0}: Error finding container 4f7d816a2b2500f8094da949ce41902ac7d974639e3dccccf09819353f2f7e80: Status 404 returned error can't find the container with id 4f7d816a2b2500f8094da949ce41902ac7d974639e3dccccf09819353f2f7e80 Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.407809 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.420994 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.437516 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.447733 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.473474 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.478211 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.483647 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.483681 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.483647 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:16 crc kubenswrapper[5014]: E1006 21:31:16.483775 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:16 crc kubenswrapper[5014]: E1006 21:31:16.483826 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:16 crc kubenswrapper[5014]: E1006 21:31:16.483873 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:16 crc kubenswrapper[5014]: W1006 21:31:16.495789 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf5c94fe2_2a02_4515_bc83_234827e59e4f.slice/crio-090fa4eba1592dceb56304c455baccf92878a3321b198d71b1f0eb89f41b06b4 WatchSource:0}: Error finding container 090fa4eba1592dceb56304c455baccf92878a3321b198d71b1f0eb89f41b06b4: Status 404 returned error can't find the container with id 090fa4eba1592dceb56304c455baccf92878a3321b198d71b1f0eb89f41b06b4 Oct 06 21:31:16 crc kubenswrapper[5014]: W1006 21:31:16.502347 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5d2de4ac_a423_4f5a_904a_817553f204f6.slice/crio-cbb0bac5c2e6aaec1523e691ea7627fc73d4a57d2fcce6fc9f8a39726825a775 WatchSource:0}: Error finding container cbb0bac5c2e6aaec1523e691ea7627fc73d4a57d2fcce6fc9f8a39726825a775: Status 404 returned error can't find the container with id cbb0bac5c2e6aaec1523e691ea7627fc73d4a57d2fcce6fc9f8a39726825a775 Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.643842 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerStarted","Data":"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114"} Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.643896 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerStarted","Data":"cbb0bac5c2e6aaec1523e691ea7627fc73d4a57d2fcce6fc9f8a39726825a775"} Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.645797 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-rmjtc" event={"ID":"c53c82a2-7b51-49c1-88f4-fdb5df783712","Type":"ContainerStarted","Data":"80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a"} Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.645833 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-rmjtc" event={"ID":"c53c82a2-7b51-49c1-88f4-fdb5df783712","Type":"ContainerStarted","Data":"5e6e87662d7a38782ed23cb806fdfa97cc0365ca1d14c00ace1b08b6218d8a27"} Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.652065 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c"} Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.652138 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"0ca858b93956ee5466c0a178654e00aba33bcd6d369ce826ef331205484900cb"} Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.656394 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" event={"ID":"f5c94fe2-2a02-4515-bc83-234827e59e4f","Type":"ContainerStarted","Data":"090fa4eba1592dceb56304c455baccf92878a3321b198d71b1f0eb89f41b06b4"} Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.658911 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8ddbf" event={"ID":"9f1464a5-d713-4f79-8248-33c69abcdac2","Type":"ContainerStarted","Data":"7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4"} Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.658952 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8ddbf" event={"ID":"9f1464a5-d713-4f79-8248-33c69abcdac2","Type":"ContainerStarted","Data":"4f7d816a2b2500f8094da949ce41902ac7d974639e3dccccf09819353f2f7e80"} Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.674256 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.686919 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.702989 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.715530 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.727211 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.737793 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.751134 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.778933 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.804256 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.824172 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.837348 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.858862 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.871612 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.887577 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.907251 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.924983 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.936260 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.954433 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.974888 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:16 crc kubenswrapper[5014]: I1006 21:31:16.989886 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:16Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.007103 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.031220 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.066203 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.083941 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.113791 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.150443 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.663917 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306"} Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.667845 5014 generic.go:334] "Generic (PLEG): container finished" podID="f5c94fe2-2a02-4515-bc83-234827e59e4f" containerID="ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c" exitCode=0 Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.667900 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" event={"ID":"f5c94fe2-2a02-4515-bc83-234827e59e4f","Type":"ContainerDied","Data":"ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c"} Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.681075 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3"} Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.686118 5014 generic.go:334] "Generic (PLEG): container finished" podID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerID="7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114" exitCode=0 Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.686206 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerDied","Data":"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114"} Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.687732 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.704990 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.727605 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.738291 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.754363 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.768293 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.780394 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.795416 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.815262 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.827146 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.838724 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.850365 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.859327 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.870420 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.879186 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.889815 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.902240 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.913304 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.938782 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.953453 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.965101 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.979661 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:17 crc kubenswrapper[5014]: I1006 21:31:17.991487 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:17Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.016135 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.028057 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.041227 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.148066 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.148257 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:31:22.148221653 +0000 UTC m=+27.441258417 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.248690 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.248733 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.248762 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.248783 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.248888 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.248903 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.248906 5014 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.248936 5014 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.248978 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:22.248964749 +0000 UTC m=+27.542001483 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.249001 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:22.24898496 +0000 UTC m=+27.542021684 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.248913 5014 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.249052 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:22.249047063 +0000 UTC m=+27.542083797 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.249140 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.249153 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.249161 5014 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.249201 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:22.249178367 +0000 UTC m=+27.542215101 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.452991 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.459657 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.468510 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.483556 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.483585 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.483663 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.483690 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.483819 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.483881 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.485832 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.503226 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.518697 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.531671 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.542089 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.556427 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.569990 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.587830 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.659517 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.683179 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.689873 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" event={"ID":"f5c94fe2-2a02-4515-bc83-234827e59e4f","Type":"ContainerStarted","Data":"0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168"} Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.699050 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerStarted","Data":"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055"} Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.699077 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerStarted","Data":"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3"} Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.699095 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerStarted","Data":"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b"} Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.699104 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerStarted","Data":"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0"} Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.699114 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerStarted","Data":"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3"} Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.699123 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerStarted","Data":"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b"} Oct 06 21:31:18 crc kubenswrapper[5014]: E1006 21:31:18.704142 5014 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.705814 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.717659 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.727524 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.744310 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.755367 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.767698 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.778565 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.788145 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.801385 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.813203 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.831751 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.845575 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.859100 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.903221 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.938293 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:18 crc kubenswrapper[5014]: I1006 21:31:18.976870 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:18Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.023333 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:19Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.704818 5014 generic.go:334] "Generic (PLEG): container finished" podID="f5c94fe2-2a02-4515-bc83-234827e59e4f" containerID="0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168" exitCode=0 Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.704912 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" event={"ID":"f5c94fe2-2a02-4515-bc83-234827e59e4f","Type":"ContainerDied","Data":"0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168"} Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.733612 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:19Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.752911 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:19Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.792507 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:19Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.806289 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:19Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.821682 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:19Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.835011 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:19Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.847290 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:19Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.861305 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:19Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.876225 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:19Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.905508 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:19Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.918291 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:19Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.934988 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:19Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.955810 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:19Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:19 crc kubenswrapper[5014]: I1006 21:31:19.969861 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:19Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.483832 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.483948 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:20 crc kubenswrapper[5014]: E1006 21:31:20.484041 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:20 crc kubenswrapper[5014]: E1006 21:31:20.484219 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.484357 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:20 crc kubenswrapper[5014]: E1006 21:31:20.484482 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.672571 5014 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.675383 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.675655 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.675821 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.676042 5014 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.686567 5014 kubelet_node_status.go:115] "Node was previously registered" node="crc" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.687019 5014 kubelet_node_status.go:79] "Successfully registered node" node="crc" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.688439 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.688481 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.688498 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.688522 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.688539 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:20Z","lastTransitionTime":"2025-10-06T21:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.712107 5014 generic.go:334] "Generic (PLEG): container finished" podID="f5c94fe2-2a02-4515-bc83-234827e59e4f" containerID="41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694" exitCode=0 Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.712227 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" event={"ID":"f5c94fe2-2a02-4515-bc83-234827e59e4f","Type":"ContainerDied","Data":"41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694"} Oct 06 21:31:20 crc kubenswrapper[5014]: E1006 21:31:20.713086 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.725921 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.726030 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.726051 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.726088 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.726107 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:20Z","lastTransitionTime":"2025-10-06T21:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:20 crc kubenswrapper[5014]: E1006 21:31:20.752603 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.754298 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.759410 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.759463 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.759477 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.759498 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.759511 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:20Z","lastTransitionTime":"2025-10-06T21:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.768422 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: E1006 21:31:20.779997 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.785297 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.785977 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.786022 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.786037 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.786059 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.786074 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:20Z","lastTransitionTime":"2025-10-06T21:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:20 crc kubenswrapper[5014]: E1006 21:31:20.799869 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.804147 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.804198 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.804211 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.804231 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.804242 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:20Z","lastTransitionTime":"2025-10-06T21:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.810652 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: E1006 21:31:20.820487 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: E1006 21:31:20.820710 5014 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.824187 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.824225 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.824240 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.824260 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.824332 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:20Z","lastTransitionTime":"2025-10-06T21:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.825966 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.841362 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.858538 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.870171 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.883241 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.900949 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.915053 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.925721 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.927343 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.927371 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.927383 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.927399 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.927411 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:20Z","lastTransitionTime":"2025-10-06T21:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.949563 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:20 crc kubenswrapper[5014]: I1006 21:31:20.963200 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:20Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.029368 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.029422 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.029436 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.029457 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.029471 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:21Z","lastTransitionTime":"2025-10-06T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.131914 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.131953 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.131969 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.131986 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.131997 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:21Z","lastTransitionTime":"2025-10-06T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.235666 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.235736 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.235754 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.235781 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.235800 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:21Z","lastTransitionTime":"2025-10-06T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.338401 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.338454 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.338469 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.338513 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.338531 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:21Z","lastTransitionTime":"2025-10-06T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.441069 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.441101 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.441112 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.441143 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.441154 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:21Z","lastTransitionTime":"2025-10-06T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.544228 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.544298 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.544315 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.544339 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.544356 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:21Z","lastTransitionTime":"2025-10-06T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.648528 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.648591 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.648615 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.648684 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.648709 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:21Z","lastTransitionTime":"2025-10-06T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.721580 5014 generic.go:334] "Generic (PLEG): container finished" podID="f5c94fe2-2a02-4515-bc83-234827e59e4f" containerID="4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4" exitCode=0 Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.721693 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" event={"ID":"f5c94fe2-2a02-4515-bc83-234827e59e4f","Type":"ContainerDied","Data":"4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4"} Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.731014 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerStarted","Data":"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e"} Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.740893 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:21Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.752512 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.752646 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.752667 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.752697 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.752719 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:21Z","lastTransitionTime":"2025-10-06T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.757332 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:21Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.794145 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:21Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.817912 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:21Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.835536 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:21Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.850876 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:21Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.857074 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.857135 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.857155 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.857183 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.857203 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:21Z","lastTransitionTime":"2025-10-06T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.870168 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:21Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.887699 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:21Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.904329 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:21Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.928181 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:21Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.945080 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:21Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.958962 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:21Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.963381 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.963444 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.963456 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.963473 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.963484 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:21Z","lastTransitionTime":"2025-10-06T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.974865 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:21Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:21 crc kubenswrapper[5014]: I1006 21:31:21.988415 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:21Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.066303 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.066362 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.066379 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.066404 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.066420 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:22Z","lastTransitionTime":"2025-10-06T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.091443 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-r8bdz"] Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.091901 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-r8bdz" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.096163 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.096553 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.096784 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.096897 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.099229 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/297c0e39-f2c1-4708-a2ba-cb9576086924-host\") pod \"node-ca-r8bdz\" (UID: \"297c0e39-f2c1-4708-a2ba-cb9576086924\") " pod="openshift-image-registry/node-ca-r8bdz" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.099543 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/297c0e39-f2c1-4708-a2ba-cb9576086924-serviceca\") pod \"node-ca-r8bdz\" (UID: \"297c0e39-f2c1-4708-a2ba-cb9576086924\") " pod="openshift-image-registry/node-ca-r8bdz" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.099703 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d55hg\" (UniqueName: \"kubernetes.io/projected/297c0e39-f2c1-4708-a2ba-cb9576086924-kube-api-access-d55hg\") pod \"node-ca-r8bdz\" (UID: \"297c0e39-f2c1-4708-a2ba-cb9576086924\") " pod="openshift-image-registry/node-ca-r8bdz" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.117898 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.136089 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.171857 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.171905 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.171914 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.171930 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.171940 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:22Z","lastTransitionTime":"2025-10-06T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.176637 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.194797 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.200508 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.200710 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/297c0e39-f2c1-4708-a2ba-cb9576086924-host\") pod \"node-ca-r8bdz\" (UID: \"297c0e39-f2c1-4708-a2ba-cb9576086924\") " pod="openshift-image-registry/node-ca-r8bdz" Oct 06 21:31:22 crc kubenswrapper[5014]: E1006 21:31:22.200760 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:31:30.200727259 +0000 UTC m=+35.493764043 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.200820 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/297c0e39-f2c1-4708-a2ba-cb9576086924-host\") pod \"node-ca-r8bdz\" (UID: \"297c0e39-f2c1-4708-a2ba-cb9576086924\") " pod="openshift-image-registry/node-ca-r8bdz" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.200866 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/297c0e39-f2c1-4708-a2ba-cb9576086924-serviceca\") pod \"node-ca-r8bdz\" (UID: \"297c0e39-f2c1-4708-a2ba-cb9576086924\") " pod="openshift-image-registry/node-ca-r8bdz" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.200918 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d55hg\" (UniqueName: \"kubernetes.io/projected/297c0e39-f2c1-4708-a2ba-cb9576086924-kube-api-access-d55hg\") pod \"node-ca-r8bdz\" (UID: \"297c0e39-f2c1-4708-a2ba-cb9576086924\") " pod="openshift-image-registry/node-ca-r8bdz" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.202729 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/297c0e39-f2c1-4708-a2ba-cb9576086924-serviceca\") pod \"node-ca-r8bdz\" (UID: \"297c0e39-f2c1-4708-a2ba-cb9576086924\") " pod="openshift-image-registry/node-ca-r8bdz" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.212523 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.225913 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.232515 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d55hg\" (UniqueName: \"kubernetes.io/projected/297c0e39-f2c1-4708-a2ba-cb9576086924-kube-api-access-d55hg\") pod \"node-ca-r8bdz\" (UID: \"297c0e39-f2c1-4708-a2ba-cb9576086924\") " pod="openshift-image-registry/node-ca-r8bdz" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.238441 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.256264 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.276377 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.276420 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.276432 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.276453 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.276469 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:22Z","lastTransitionTime":"2025-10-06T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.280140 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.302064 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.302145 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.302193 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.302239 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:22 crc kubenswrapper[5014]: E1006 21:31:22.302250 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 06 21:31:22 crc kubenswrapper[5014]: E1006 21:31:22.302284 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 06 21:31:22 crc kubenswrapper[5014]: E1006 21:31:22.302299 5014 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:22 crc kubenswrapper[5014]: E1006 21:31:22.302365 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:30.302341926 +0000 UTC m=+35.595378670 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:22 crc kubenswrapper[5014]: E1006 21:31:22.302364 5014 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 06 21:31:22 crc kubenswrapper[5014]: E1006 21:31:22.302409 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:30.302400408 +0000 UTC m=+35.595437152 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 06 21:31:22 crc kubenswrapper[5014]: E1006 21:31:22.302478 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 06 21:31:22 crc kubenswrapper[5014]: E1006 21:31:22.302506 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 06 21:31:22 crc kubenswrapper[5014]: E1006 21:31:22.302530 5014 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:22 crc kubenswrapper[5014]: E1006 21:31:22.302604 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:30.302581335 +0000 UTC m=+35.595618109 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:22 crc kubenswrapper[5014]: E1006 21:31:22.302722 5014 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 06 21:31:22 crc kubenswrapper[5014]: E1006 21:31:22.302767 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:30.302753031 +0000 UTC m=+35.595789805 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.310348 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.331214 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.349503 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.362579 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.379299 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.379339 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.379352 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.379373 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.379387 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:22Z","lastTransitionTime":"2025-10-06T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.380047 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.393311 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.409845 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-r8bdz" Oct 06 21:31:22 crc kubenswrapper[5014]: W1006 21:31:22.426187 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod297c0e39_f2c1_4708_a2ba_cb9576086924.slice/crio-b497e0d4172390fcbd773b55f1b55489d96c867235dede58e31150b3272fb9a2 WatchSource:0}: Error finding container b497e0d4172390fcbd773b55f1b55489d96c867235dede58e31150b3272fb9a2: Status 404 returned error can't find the container with id b497e0d4172390fcbd773b55f1b55489d96c867235dede58e31150b3272fb9a2 Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.481833 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.481876 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.481890 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.481909 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.481921 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:22Z","lastTransitionTime":"2025-10-06T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.484130 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.484155 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:22 crc kubenswrapper[5014]: E1006 21:31:22.484223 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.484167 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:22 crc kubenswrapper[5014]: E1006 21:31:22.484320 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:22 crc kubenswrapper[5014]: E1006 21:31:22.484405 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.585877 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.585915 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.585927 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.585944 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.585957 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:22Z","lastTransitionTime":"2025-10-06T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.687975 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.688335 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.688347 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.688364 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.688376 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:22Z","lastTransitionTime":"2025-10-06T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.747365 5014 generic.go:334] "Generic (PLEG): container finished" podID="f5c94fe2-2a02-4515-bc83-234827e59e4f" containerID="f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408" exitCode=0 Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.747561 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" event={"ID":"f5c94fe2-2a02-4515-bc83-234827e59e4f","Type":"ContainerDied","Data":"f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408"} Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.754914 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-r8bdz" event={"ID":"297c0e39-f2c1-4708-a2ba-cb9576086924","Type":"ContainerStarted","Data":"b497e0d4172390fcbd773b55f1b55489d96c867235dede58e31150b3272fb9a2"} Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.768259 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.781773 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.792024 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.792071 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.792089 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.792112 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.792129 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:22Z","lastTransitionTime":"2025-10-06T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.801764 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.821139 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.837337 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.859267 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.876751 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.889357 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.895337 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.895392 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.895407 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.895427 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.895439 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:22Z","lastTransitionTime":"2025-10-06T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.903416 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.932739 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.949345 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.969727 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:22 crc kubenswrapper[5014]: I1006 21:31:22.983819 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.002647 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.003405 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.003420 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.003438 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.003451 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:23Z","lastTransitionTime":"2025-10-06T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.007482 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:23Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.024838 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:23Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.107646 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.107675 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.107684 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.107697 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.107706 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:23Z","lastTransitionTime":"2025-10-06T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.212499 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.212534 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.212548 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.212567 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.212583 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:23Z","lastTransitionTime":"2025-10-06T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.315228 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.315280 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.315294 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.315314 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.315329 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:23Z","lastTransitionTime":"2025-10-06T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.418363 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.418445 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.418466 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.418496 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.418519 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:23Z","lastTransitionTime":"2025-10-06T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.521743 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.521787 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.521795 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.521812 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.522116 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:23Z","lastTransitionTime":"2025-10-06T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.625593 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.625709 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.625732 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.625767 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.625789 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:23Z","lastTransitionTime":"2025-10-06T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.729652 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.729709 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.729725 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.729748 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.729765 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:23Z","lastTransitionTime":"2025-10-06T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.763896 5014 generic.go:334] "Generic (PLEG): container finished" podID="f5c94fe2-2a02-4515-bc83-234827e59e4f" containerID="38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09" exitCode=0 Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.764001 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" event={"ID":"f5c94fe2-2a02-4515-bc83-234827e59e4f","Type":"ContainerDied","Data":"38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09"} Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.767612 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-r8bdz" event={"ID":"297c0e39-f2c1-4708-a2ba-cb9576086924","Type":"ContainerStarted","Data":"ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046"} Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.775359 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerStarted","Data":"a66ed83f428f7cbcc1d97051cd5f846b8b20843f061a6c4bfbaf3e7e0222f570"} Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.775979 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.776105 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.776337 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.790662 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:23Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.817131 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:23Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.864894 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.864972 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.864992 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.865020 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.865049 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:23Z","lastTransitionTime":"2025-10-06T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.868925 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.869935 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.869863 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:23Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.886601 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:23Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.904442 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:23Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.919148 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:23Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.932541 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:23Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.949272 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:23Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.968150 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.968212 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.968226 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.968247 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.968283 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:23Z","lastTransitionTime":"2025-10-06T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.971481 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:23Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:23 crc kubenswrapper[5014]: I1006 21:31:23.989530 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:23Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.006753 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.021696 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.050244 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.067021 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.071509 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.071549 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.071564 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.071584 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.071600 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:24Z","lastTransitionTime":"2025-10-06T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.083503 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.104526 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.122138 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.145053 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.161905 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.174044 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.174097 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.174178 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.174207 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.174227 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:24Z","lastTransitionTime":"2025-10-06T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.174772 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.193160 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.218696 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.234367 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.251753 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.266689 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.277200 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.277271 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.277290 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.277316 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.277332 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:24Z","lastTransitionTime":"2025-10-06T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.284804 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.301055 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.314097 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.329330 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.352065 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a66ed83f428f7cbcc1d97051cd5f846b8b20843f061a6c4bfbaf3e7e0222f570\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.379939 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.379994 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.380014 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.380039 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.380057 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:24Z","lastTransitionTime":"2025-10-06T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.483504 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.483509 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:24 crc kubenswrapper[5014]: E1006 21:31:24.483881 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.483554 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.483903 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:24 crc kubenswrapper[5014]: E1006 21:31:24.484060 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.484097 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.484137 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.483526 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.484161 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:24Z","lastTransitionTime":"2025-10-06T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:24 crc kubenswrapper[5014]: E1006 21:31:24.484369 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.587556 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.587973 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.588102 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.588228 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.588353 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:24Z","lastTransitionTime":"2025-10-06T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.691963 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.692027 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.692044 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.692067 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.692084 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:24Z","lastTransitionTime":"2025-10-06T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.788656 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" event={"ID":"f5c94fe2-2a02-4515-bc83-234827e59e4f","Type":"ContainerStarted","Data":"723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1"} Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.794917 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.794971 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.794990 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.795015 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.795034 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:24Z","lastTransitionTime":"2025-10-06T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.816073 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.838080 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.872875 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a66ed83f428f7cbcc1d97051cd5f846b8b20843f061a6c4bfbaf3e7e0222f570\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.889584 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.898080 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.898158 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.898184 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.898218 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.898253 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:24Z","lastTransitionTime":"2025-10-06T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.911220 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.927930 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.939818 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.953650 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.969144 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:24 crc kubenswrapper[5014]: I1006 21:31:24.984014 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.000847 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.000834 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:24Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.000971 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.001128 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.001165 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.001188 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:25Z","lastTransitionTime":"2025-10-06T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.014040 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.045513 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.064900 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.084056 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.104447 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.104511 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.104529 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.104555 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.104573 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:25Z","lastTransitionTime":"2025-10-06T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.207758 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.207816 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.207836 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.207861 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.207878 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:25Z","lastTransitionTime":"2025-10-06T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.311666 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.311725 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.311752 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.311784 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.311807 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:25Z","lastTransitionTime":"2025-10-06T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.415184 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.415238 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.415256 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.415283 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.415303 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:25Z","lastTransitionTime":"2025-10-06T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.499164 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.510351 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.517422 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.517467 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.517479 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.517496 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.517510 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:25Z","lastTransitionTime":"2025-10-06T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.532963 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a66ed83f428f7cbcc1d97051cd5f846b8b20843f061a6c4bfbaf3e7e0222f570\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.565009 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.609838 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.620380 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.620416 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.620427 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.620442 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.620452 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:25Z","lastTransitionTime":"2025-10-06T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.634327 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.646175 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.664969 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.680335 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.692173 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.707240 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.716808 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.723128 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.723168 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.723181 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.723199 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.723211 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:25Z","lastTransitionTime":"2025-10-06T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.733245 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.743636 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.753509 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.825573 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.825689 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.825708 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.825732 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.825756 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:25Z","lastTransitionTime":"2025-10-06T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.928922 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.928998 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.929017 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.929041 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:25 crc kubenswrapper[5014]: I1006 21:31:25.929058 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:25Z","lastTransitionTime":"2025-10-06T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.032145 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.032228 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.032248 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.032273 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.032290 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:26Z","lastTransitionTime":"2025-10-06T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.135012 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.135082 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.135106 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.135137 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.135159 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:26Z","lastTransitionTime":"2025-10-06T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.238232 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.238287 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.238305 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.238328 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.238345 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:26Z","lastTransitionTime":"2025-10-06T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.341441 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.341508 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.341531 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.341562 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.341584 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:26Z","lastTransitionTime":"2025-10-06T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.443990 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.444057 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.444081 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.444115 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.444136 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:26Z","lastTransitionTime":"2025-10-06T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.484108 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.484212 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.484128 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:26 crc kubenswrapper[5014]: E1006 21:31:26.484285 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:26 crc kubenswrapper[5014]: E1006 21:31:26.484399 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:26 crc kubenswrapper[5014]: E1006 21:31:26.484664 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.546813 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.546871 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.546888 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.546913 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.546930 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:26Z","lastTransitionTime":"2025-10-06T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.650197 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.650257 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.650275 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.650298 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.650315 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:26Z","lastTransitionTime":"2025-10-06T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.752861 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.752925 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.752944 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.752969 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.752989 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:26Z","lastTransitionTime":"2025-10-06T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.798146 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovnkube-controller/0.log" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.801939 5014 generic.go:334] "Generic (PLEG): container finished" podID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerID="a66ed83f428f7cbcc1d97051cd5f846b8b20843f061a6c4bfbaf3e7e0222f570" exitCode=1 Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.801991 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerDied","Data":"a66ed83f428f7cbcc1d97051cd5f846b8b20843f061a6c4bfbaf3e7e0222f570"} Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.803104 5014 scope.go:117] "RemoveContainer" containerID="a66ed83f428f7cbcc1d97051cd5f846b8b20843f061a6c4bfbaf3e7e0222f570" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.825538 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:26Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.847248 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:26Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.855990 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.856058 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.856079 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.856104 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.856123 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:26Z","lastTransitionTime":"2025-10-06T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.868295 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:26Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.894280 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:26Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.928814 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a66ed83f428f7cbcc1d97051cd5f846b8b20843f061a6c4bfbaf3e7e0222f570\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a66ed83f428f7cbcc1d97051cd5f846b8b20843f061a6c4bfbaf3e7e0222f570\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:26Z\\\",\\\"message\\\":\\\"ing reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1006 21:31:26.066965 6277 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1006 21:31:26.067251 6277 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1006 21:31:26.067388 6277 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1006 21:31:26.067481 6277 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1006 21:31:26.068105 6277 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1006 21:31:26.068127 6277 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1006 21:31:26.068152 6277 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1006 21:31:26.068207 6277 handler.go:208] Removed *v1.Node event handler 2\\\\nI1006 21:31:26.068219 6277 factory.go:656] Stopping watch factory\\\\nI1006 21:31:26.068236 6277 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1006 21:31:26.068248 6277 ovnkube.go:599] Stopped ovnkube\\\\nI10\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:26Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.949578 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:26Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.959320 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.959372 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.959389 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.959411 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.959429 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:26Z","lastTransitionTime":"2025-10-06T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.970102 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:26Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:26 crc kubenswrapper[5014]: I1006 21:31:26.993360 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:26Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.018091 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:27Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.043124 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:27Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.062530 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:27Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.062873 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.062900 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.062916 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.062939 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.062967 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:27Z","lastTransitionTime":"2025-10-06T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.094315 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:27Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.113944 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:27Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.131119 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:27Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.149851 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:27Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.167735 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.167777 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.167793 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.167814 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.167833 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:27Z","lastTransitionTime":"2025-10-06T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.273365 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.273403 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.273418 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.273440 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.273456 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:27Z","lastTransitionTime":"2025-10-06T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.375863 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.375913 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.375930 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.375952 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.375968 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:27Z","lastTransitionTime":"2025-10-06T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.479509 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.479563 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.479580 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.479604 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.479653 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:27Z","lastTransitionTime":"2025-10-06T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.582741 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.582785 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.582801 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.582823 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.582840 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:27Z","lastTransitionTime":"2025-10-06T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.686217 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.686256 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.686268 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.686285 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.686296 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:27Z","lastTransitionTime":"2025-10-06T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.793557 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.793674 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.793705 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.793738 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.793763 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:27Z","lastTransitionTime":"2025-10-06T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.810479 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovnkube-controller/0.log" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.814529 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerStarted","Data":"db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e"} Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.815813 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.838076 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:27Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.859143 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:27Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.896880 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.896963 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.896988 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.897021 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.897047 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:27Z","lastTransitionTime":"2025-10-06T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.897907 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a66ed83f428f7cbcc1d97051cd5f846b8b20843f061a6c4bfbaf3e7e0222f570\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:26Z\\\",\\\"message\\\":\\\"ing reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1006 21:31:26.066965 6277 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1006 21:31:26.067251 6277 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1006 21:31:26.067388 6277 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1006 21:31:26.067481 6277 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1006 21:31:26.068105 6277 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1006 21:31:26.068127 6277 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1006 21:31:26.068152 6277 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1006 21:31:26.068207 6277 handler.go:208] Removed *v1.Node event handler 2\\\\nI1006 21:31:26.068219 6277 factory.go:656] Stopping watch factory\\\\nI1006 21:31:26.068236 6277 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1006 21:31:26.068248 6277 ovnkube.go:599] Stopped ovnkube\\\\nI10\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:27Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.923948 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:27Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.950320 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:27Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.968270 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:27Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:27 crc kubenswrapper[5014]: I1006 21:31:27.984128 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:27Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.000301 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.000339 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.000350 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.000367 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.000379 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:28Z","lastTransitionTime":"2025-10-06T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.002993 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.023001 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.051794 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.068313 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.080667 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.094227 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.102559 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.102591 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.102599 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.102613 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.102634 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:28Z","lastTransitionTime":"2025-10-06T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.112074 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.134276 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.205414 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.205491 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.205509 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.205535 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.205553 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:28Z","lastTransitionTime":"2025-10-06T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.309333 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.309393 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.309411 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.309437 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.309455 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:28Z","lastTransitionTime":"2025-10-06T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.412584 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.412665 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.412683 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.412705 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.412722 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:28Z","lastTransitionTime":"2025-10-06T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.483572 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.483694 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.483572 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:28 crc kubenswrapper[5014]: E1006 21:31:28.483792 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:28 crc kubenswrapper[5014]: E1006 21:31:28.483933 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:28 crc kubenswrapper[5014]: E1006 21:31:28.484060 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.515875 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.515973 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.516000 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.516027 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.516049 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:28Z","lastTransitionTime":"2025-10-06T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.627095 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.627154 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.627175 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.627204 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.627222 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:28Z","lastTransitionTime":"2025-10-06T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.685189 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245"] Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.686042 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.689057 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.690575 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.712391 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.730414 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.730486 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.730835 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.730869 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.730892 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:28Z","lastTransitionTime":"2025-10-06T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.733554 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.752853 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.767530 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3f43a657-4080-406d-8736-6520c72d1a97-env-overrides\") pod \"ovnkube-control-plane-749d76644c-bj245\" (UID: \"3f43a657-4080-406d-8736-6520c72d1a97\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.767677 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3f43a657-4080-406d-8736-6520c72d1a97-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-bj245\" (UID: \"3f43a657-4080-406d-8736-6520c72d1a97\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.767742 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3f43a657-4080-406d-8736-6520c72d1a97-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-bj245\" (UID: \"3f43a657-4080-406d-8736-6520c72d1a97\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.767895 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59sjn\" (UniqueName: \"kubernetes.io/projected/3f43a657-4080-406d-8736-6520c72d1a97-kube-api-access-59sjn\") pod \"ovnkube-control-plane-749d76644c-bj245\" (UID: \"3f43a657-4080-406d-8736-6520c72d1a97\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.773816 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.804669 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a66ed83f428f7cbcc1d97051cd5f846b8b20843f061a6c4bfbaf3e7e0222f570\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:26Z\\\",\\\"message\\\":\\\"ing reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1006 21:31:26.066965 6277 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1006 21:31:26.067251 6277 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1006 21:31:26.067388 6277 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1006 21:31:26.067481 6277 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1006 21:31:26.068105 6277 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1006 21:31:26.068127 6277 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1006 21:31:26.068152 6277 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1006 21:31:26.068207 6277 handler.go:208] Removed *v1.Node event handler 2\\\\nI1006 21:31:26.068219 6277 factory.go:656] Stopping watch factory\\\\nI1006 21:31:26.068236 6277 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1006 21:31:26.068248 6277 ovnkube.go:599] Stopped ovnkube\\\\nI10\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.820917 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovnkube-controller/1.log" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.822173 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovnkube-controller/0.log" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.826557 5014 generic.go:334] "Generic (PLEG): container finished" podID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerID="db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e" exitCode=1 Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.826611 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerDied","Data":"db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e"} Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.826770 5014 scope.go:117] "RemoveContainer" containerID="a66ed83f428f7cbcc1d97051cd5f846b8b20843f061a6c4bfbaf3e7e0222f570" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.828070 5014 scope.go:117] "RemoveContainer" containerID="db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e" Oct 06 21:31:28 crc kubenswrapper[5014]: E1006 21:31:28.828567 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.836726 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.840682 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.840803 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.840823 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.840847 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.840863 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:28Z","lastTransitionTime":"2025-10-06T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.860678 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.869291 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3f43a657-4080-406d-8736-6520c72d1a97-env-overrides\") pod \"ovnkube-control-plane-749d76644c-bj245\" (UID: \"3f43a657-4080-406d-8736-6520c72d1a97\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.869385 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3f43a657-4080-406d-8736-6520c72d1a97-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-bj245\" (UID: \"3f43a657-4080-406d-8736-6520c72d1a97\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.869423 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3f43a657-4080-406d-8736-6520c72d1a97-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-bj245\" (UID: \"3f43a657-4080-406d-8736-6520c72d1a97\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.869548 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59sjn\" (UniqueName: \"kubernetes.io/projected/3f43a657-4080-406d-8736-6520c72d1a97-kube-api-access-59sjn\") pod \"ovnkube-control-plane-749d76644c-bj245\" (UID: \"3f43a657-4080-406d-8736-6520c72d1a97\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.871051 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3f43a657-4080-406d-8736-6520c72d1a97-env-overrides\") pod \"ovnkube-control-plane-749d76644c-bj245\" (UID: \"3f43a657-4080-406d-8736-6520c72d1a97\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.871499 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3f43a657-4080-406d-8736-6520c72d1a97-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-bj245\" (UID: \"3f43a657-4080-406d-8736-6520c72d1a97\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.881674 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3f43a657-4080-406d-8736-6520c72d1a97-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-bj245\" (UID: \"3f43a657-4080-406d-8736-6520c72d1a97\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.890670 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.908551 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59sjn\" (UniqueName: \"kubernetes.io/projected/3f43a657-4080-406d-8736-6520c72d1a97-kube-api-access-59sjn\") pod \"ovnkube-control-plane-749d76644c-bj245\" (UID: \"3f43a657-4080-406d-8736-6520c72d1a97\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.910788 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.928759 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.946422 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.946477 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.946502 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.946535 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.946554 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:28Z","lastTransitionTime":"2025-10-06T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.949127 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:28 crc kubenswrapper[5014]: I1006 21:31:28.972229 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:28Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.006011 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.008673 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: W1006 21:31:29.026306 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f43a657_4080_406d_8736_6520c72d1a97.slice/crio-0f99e4e6e0dc09df85119e0db066895b0cc11f1cc88b0c78a418b341fa8d861d WatchSource:0}: Error finding container 0f99e4e6e0dc09df85119e0db066895b0cc11f1cc88b0c78a418b341fa8d861d: Status 404 returned error can't find the container with id 0f99e4e6e0dc09df85119e0db066895b0cc11f1cc88b0c78a418b341fa8d861d Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.033404 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.050013 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.050096 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.050126 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.050161 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.050181 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:29Z","lastTransitionTime":"2025-10-06T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.057242 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.075548 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.114086 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.133720 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.153203 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.154862 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.154919 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.154944 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.154975 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.155000 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:29Z","lastTransitionTime":"2025-10-06T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.174009 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.200036 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.219678 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.234759 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.254226 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.258912 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.258961 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.258982 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.259006 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.259023 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:29Z","lastTransitionTime":"2025-10-06T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.284430 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a66ed83f428f7cbcc1d97051cd5f846b8b20843f061a6c4bfbaf3e7e0222f570\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:26Z\\\",\\\"message\\\":\\\"ing reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1006 21:31:26.066965 6277 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1006 21:31:26.067251 6277 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1006 21:31:26.067388 6277 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1006 21:31:26.067481 6277 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1006 21:31:26.068105 6277 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1006 21:31:26.068127 6277 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1006 21:31:26.068152 6277 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1006 21:31:26.068207 6277 handler.go:208] Removed *v1.Node event handler 2\\\\nI1006 21:31:26.068219 6277 factory.go:656] Stopping watch factory\\\\nI1006 21:31:26.068236 6277 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1006 21:31:26.068248 6277 ovnkube.go:599] Stopped ovnkube\\\\nI10\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:27Z\\\",\\\"message\\\":\\\"rable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1006 21:31:27.887087 6437 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1006 21:31:27.887164 6437 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-console-operator/metrics]} name:Service_openshift-console-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.88:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ebd4748e-0473-49fb-88ad-83dbb221791a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.301800 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.319242 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.344236 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.362188 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.362231 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.362250 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.362274 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.362292 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:29Z","lastTransitionTime":"2025-10-06T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.363295 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.387851 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.409021 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.429116 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.466181 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.466229 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.466246 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.466271 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.466287 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:29Z","lastTransitionTime":"2025-10-06T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.569385 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.569463 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.569487 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.569521 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.569546 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:29Z","lastTransitionTime":"2025-10-06T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.672592 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.672680 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.672700 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.672734 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.672759 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:29Z","lastTransitionTime":"2025-10-06T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.775393 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.775446 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.775464 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.775487 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.775508 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:29Z","lastTransitionTime":"2025-10-06T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.812105 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.833598 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovnkube-controller/1.log" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.842123 5014 scope.go:117] "RemoveContainer" containerID="db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e" Oct 06 21:31:29 crc kubenswrapper[5014]: E1006 21:31:29.843137 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.857044 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.857520 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" event={"ID":"3f43a657-4080-406d-8736-6520c72d1a97","Type":"ContainerStarted","Data":"51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a"} Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.857923 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" event={"ID":"3f43a657-4080-406d-8736-6520c72d1a97","Type":"ContainerStarted","Data":"9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3"} Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.857954 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" event={"ID":"3f43a657-4080-406d-8736-6520c72d1a97","Type":"ContainerStarted","Data":"0f99e4e6e0dc09df85119e0db066895b0cc11f1cc88b0c78a418b341fa8d861d"} Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.879113 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.879169 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.879186 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.879210 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.879231 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:29Z","lastTransitionTime":"2025-10-06T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.887184 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.906170 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.922504 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.946807 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.963789 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.981966 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.982946 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.983013 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.983033 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.983056 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:29 crc kubenswrapper[5014]: I1006 21:31:29.983074 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:29Z","lastTransitionTime":"2025-10-06T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.000981 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:29Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.026985 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a66ed83f428f7cbcc1d97051cd5f846b8b20843f061a6c4bfbaf3e7e0222f570\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:26Z\\\",\\\"message\\\":\\\"ing reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1006 21:31:26.066965 6277 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1006 21:31:26.067251 6277 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1006 21:31:26.067388 6277 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1006 21:31:26.067481 6277 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1006 21:31:26.068105 6277 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1006 21:31:26.068127 6277 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1006 21:31:26.068152 6277 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1006 21:31:26.068207 6277 handler.go:208] Removed *v1.Node event handler 2\\\\nI1006 21:31:26.068219 6277 factory.go:656] Stopping watch factory\\\\nI1006 21:31:26.068236 6277 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1006 21:31:26.068248 6277 ovnkube.go:599] Stopped ovnkube\\\\nI10\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:27Z\\\",\\\"message\\\":\\\"rable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1006 21:31:27.887087 6437 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1006 21:31:27.887164 6437 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-console-operator/metrics]} name:Service_openshift-console-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.88:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ebd4748e-0473-49fb-88ad-83dbb221791a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.048478 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.070802 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.086504 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.086568 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.086592 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.086658 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.086681 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:30Z","lastTransitionTime":"2025-10-06T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.091924 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.106943 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.125555 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.148839 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.165740 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.185323 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.190118 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.190182 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.190199 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.190224 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.190242 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:30Z","lastTransitionTime":"2025-10-06T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.207739 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.240103 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:27Z\\\",\\\"message\\\":\\\"rable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1006 21:31:27.887087 6437 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1006 21:31:27.887164 6437 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-console-operator/metrics]} name:Service_openshift-console-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.88:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ebd4748e-0473-49fb-88ad-83dbb221791a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.243509 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-chcf6"] Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.244609 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.244950 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.262557 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.284415 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.284612 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:31:46.284575049 +0000 UTC m=+51.577611823 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.284710 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs\") pod \"network-metrics-daemon-chcf6\" (UID: \"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\") " pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.284865 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhgks\" (UniqueName: \"kubernetes.io/projected/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-kube-api-access-xhgks\") pod \"network-metrics-daemon-chcf6\" (UID: \"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\") " pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.285853 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.292430 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.292482 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.292499 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.292523 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.292540 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:30Z","lastTransitionTime":"2025-10-06T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.305348 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.323005 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.343377 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.368715 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.385156 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.386110 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.386312 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs\") pod \"network-metrics-daemon-chcf6\" (UID: \"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\") " pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.386485 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhgks\" (UniqueName: \"kubernetes.io/projected/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-kube-api-access-xhgks\") pod \"network-metrics-daemon-chcf6\" (UID: \"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\") " pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.386728 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.386401 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.386955 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.386985 5014 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.387024 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.387096 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.387132 5014 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.386475 5014 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.387152 5014 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.387105 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:46.387039284 +0000 UTC m=+51.680076048 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.387336 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:46.387308053 +0000 UTC m=+51.680344827 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.387407 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:46.387393167 +0000 UTC m=+51.680429941 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.387431 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs podName:e4dbffab-5f6a-4ba5-b0c3-68e7e8840621 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:30.887421408 +0000 UTC m=+36.180458182 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs") pod "network-metrics-daemon-chcf6" (UID: "e4dbffab-5f6a-4ba5-b0c3-68e7e8840621") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.386914 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.388394 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.388500 5014 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.388859 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:46.388838286 +0000 UTC m=+51.681875060 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.395909 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.395990 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.396006 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.396024 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.396035 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:30Z","lastTransitionTime":"2025-10-06T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.417307 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.427249 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhgks\" (UniqueName: \"kubernetes.io/projected/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-kube-api-access-xhgks\") pod \"network-metrics-daemon-chcf6\" (UID: \"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\") " pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.441595 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.459985 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.477470 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.483837 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.483837 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.484047 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.484004 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.484125 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.484335 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.499076 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.499407 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.499558 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.499827 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.500055 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:30Z","lastTransitionTime":"2025-10-06T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.501247 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.516816 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.534126 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.558975 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:27Z\\\",\\\"message\\\":\\\"rable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1006 21:31:27.887087 6437 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1006 21:31:27.887164 6437 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-console-operator/metrics]} name:Service_openshift-console-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.88:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ebd4748e-0473-49fb-88ad-83dbb221791a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.576489 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.592219 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.603357 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.603431 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.603486 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.603519 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.603543 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:30Z","lastTransitionTime":"2025-10-06T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.639804 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.661724 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.680566 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.700993 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.705955 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.705987 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.705999 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.706016 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.706028 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:30Z","lastTransitionTime":"2025-10-06T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.718227 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.737825 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.750644 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-chcf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-chcf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.764132 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.777041 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.809164 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.809221 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.809238 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.809263 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.809281 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:30Z","lastTransitionTime":"2025-10-06T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.813941 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.832792 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.855658 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.873302 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.894173 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs\") pod \"network-metrics-daemon-chcf6\" (UID: \"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\") " pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.894387 5014 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.894462 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs podName:e4dbffab-5f6a-4ba5-b0c3-68e7e8840621 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:31.894440587 +0000 UTC m=+37.187477351 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs") pod "network-metrics-daemon-chcf6" (UID: "e4dbffab-5f6a-4ba5-b0c3-68e7e8840621") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.912238 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.912316 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.912340 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.912369 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.912393 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:30Z","lastTransitionTime":"2025-10-06T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.914435 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.914498 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.914516 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.914548 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.914567 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:30Z","lastTransitionTime":"2025-10-06T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.935431 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.940699 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.940766 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.940788 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.940817 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.940838 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:30Z","lastTransitionTime":"2025-10-06T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.957136 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.963223 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.963274 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.963294 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.963319 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.963339 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:30Z","lastTransitionTime":"2025-10-06T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:30 crc kubenswrapper[5014]: E1006 21:31:30.984086 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:30Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.989208 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.989285 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.989305 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.989329 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:30 crc kubenswrapper[5014]: I1006 21:31:30.989348 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:30Z","lastTransitionTime":"2025-10-06T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:31 crc kubenswrapper[5014]: E1006 21:31:31.006157 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:31Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.011042 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.011091 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.011108 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.011131 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.011148 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:31Z","lastTransitionTime":"2025-10-06T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:31 crc kubenswrapper[5014]: E1006 21:31:31.027910 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:31Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:31 crc kubenswrapper[5014]: E1006 21:31:31.028189 5014 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.030235 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.030302 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.030322 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.030343 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.030363 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:31Z","lastTransitionTime":"2025-10-06T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.133694 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.133755 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.133777 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.133805 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.133826 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:31Z","lastTransitionTime":"2025-10-06T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.236828 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.236892 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.236915 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.236943 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.236965 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:31Z","lastTransitionTime":"2025-10-06T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.340506 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.340716 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.340752 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.340780 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.340800 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:31Z","lastTransitionTime":"2025-10-06T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.443718 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.443791 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.443810 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.443833 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.443850 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:31Z","lastTransitionTime":"2025-10-06T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.484367 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:31 crc kubenswrapper[5014]: E1006 21:31:31.484574 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.547606 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.547708 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.547726 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.547750 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.547767 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:31Z","lastTransitionTime":"2025-10-06T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.650869 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.650932 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.650949 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.650971 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.650988 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:31Z","lastTransitionTime":"2025-10-06T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.754136 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.754195 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.754214 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.754238 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.754255 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:31Z","lastTransitionTime":"2025-10-06T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.857512 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.857589 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.857613 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.857680 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.857702 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:31Z","lastTransitionTime":"2025-10-06T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.905094 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs\") pod \"network-metrics-daemon-chcf6\" (UID: \"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\") " pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:31 crc kubenswrapper[5014]: E1006 21:31:31.905303 5014 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 06 21:31:31 crc kubenswrapper[5014]: E1006 21:31:31.905417 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs podName:e4dbffab-5f6a-4ba5-b0c3-68e7e8840621 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:33.905386959 +0000 UTC m=+39.198423733 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs") pod "network-metrics-daemon-chcf6" (UID: "e4dbffab-5f6a-4ba5-b0c3-68e7e8840621") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.960728 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.960782 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.960812 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.960835 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:31 crc kubenswrapper[5014]: I1006 21:31:31.960852 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:31Z","lastTransitionTime":"2025-10-06T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.064111 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.064156 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.064176 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.064200 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.064218 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:32Z","lastTransitionTime":"2025-10-06T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.167199 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.167254 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.167268 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.167291 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.167306 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:32Z","lastTransitionTime":"2025-10-06T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.270500 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.270550 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.270564 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.270583 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.270595 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:32Z","lastTransitionTime":"2025-10-06T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.372896 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.372966 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.373003 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.373026 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.373044 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:32Z","lastTransitionTime":"2025-10-06T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.476475 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.476537 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.476559 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.476588 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.476609 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:32Z","lastTransitionTime":"2025-10-06T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.483870 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.483969 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:32 crc kubenswrapper[5014]: E1006 21:31:32.484049 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.484103 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:32 crc kubenswrapper[5014]: E1006 21:31:32.484286 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:32 crc kubenswrapper[5014]: E1006 21:31:32.484554 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.579682 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.579753 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.579776 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.579805 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.579824 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:32Z","lastTransitionTime":"2025-10-06T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.682761 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.682821 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.682842 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.682869 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.682892 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:32Z","lastTransitionTime":"2025-10-06T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.785567 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.785662 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.785687 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.785719 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.785744 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:32Z","lastTransitionTime":"2025-10-06T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.888500 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.888614 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.888686 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.888719 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.888739 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:32Z","lastTransitionTime":"2025-10-06T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.991900 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.991988 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.992006 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.992029 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:32 crc kubenswrapper[5014]: I1006 21:31:32.992047 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:32Z","lastTransitionTime":"2025-10-06T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.095057 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.095122 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.095144 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.095174 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.095194 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:33Z","lastTransitionTime":"2025-10-06T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.198279 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.198339 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.198361 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.198406 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.198424 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:33Z","lastTransitionTime":"2025-10-06T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.301009 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.301078 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.301099 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.301127 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.301149 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:33Z","lastTransitionTime":"2025-10-06T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.403758 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.403825 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.403843 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.403868 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.403886 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:33Z","lastTransitionTime":"2025-10-06T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.484268 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:33 crc kubenswrapper[5014]: E1006 21:31:33.484514 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.507077 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.507137 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.507164 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.507192 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.507215 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:33Z","lastTransitionTime":"2025-10-06T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.610246 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.610294 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.610311 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.610333 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.610349 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:33Z","lastTransitionTime":"2025-10-06T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.713462 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.713525 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.713545 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.713570 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.713588 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:33Z","lastTransitionTime":"2025-10-06T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.817036 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.817100 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.817117 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.817142 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.817160 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:33Z","lastTransitionTime":"2025-10-06T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.920748 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.920829 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.920897 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.920932 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.920953 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:33Z","lastTransitionTime":"2025-10-06T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:33 crc kubenswrapper[5014]: I1006 21:31:33.928363 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs\") pod \"network-metrics-daemon-chcf6\" (UID: \"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\") " pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:33 crc kubenswrapper[5014]: E1006 21:31:33.928517 5014 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 06 21:31:33 crc kubenswrapper[5014]: E1006 21:31:33.928649 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs podName:e4dbffab-5f6a-4ba5-b0c3-68e7e8840621 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:37.92859213 +0000 UTC m=+43.221628894 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs") pod "network-metrics-daemon-chcf6" (UID: "e4dbffab-5f6a-4ba5-b0c3-68e7e8840621") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.023957 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.024105 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.024125 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.024151 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.024198 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:34Z","lastTransitionTime":"2025-10-06T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.127823 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.127901 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.127920 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.127948 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.127965 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:34Z","lastTransitionTime":"2025-10-06T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.231960 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.232045 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.232073 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.232107 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.232132 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:34Z","lastTransitionTime":"2025-10-06T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.334806 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.334868 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.334885 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.334911 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.334928 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:34Z","lastTransitionTime":"2025-10-06T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.437408 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.437473 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.437495 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.437523 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.437546 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:34Z","lastTransitionTime":"2025-10-06T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.483981 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.484066 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.483977 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:34 crc kubenswrapper[5014]: E1006 21:31:34.484184 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:34 crc kubenswrapper[5014]: E1006 21:31:34.484316 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:34 crc kubenswrapper[5014]: E1006 21:31:34.484446 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.541288 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.541350 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.541368 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.541392 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.541410 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:34Z","lastTransitionTime":"2025-10-06T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.644256 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.644348 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.644371 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.644397 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.644414 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:34Z","lastTransitionTime":"2025-10-06T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.748469 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.748538 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.748555 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.748579 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.748595 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:34Z","lastTransitionTime":"2025-10-06T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.851660 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.851726 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.851746 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.851775 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.851793 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:34Z","lastTransitionTime":"2025-10-06T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.955040 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.955091 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.955108 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.955131 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:34 crc kubenswrapper[5014]: I1006 21:31:34.955148 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:34Z","lastTransitionTime":"2025-10-06T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.057712 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.057769 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.057789 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.057812 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.057829 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:35Z","lastTransitionTime":"2025-10-06T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.160587 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.160664 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.160682 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.160708 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.160726 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:35Z","lastTransitionTime":"2025-10-06T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.264082 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.264136 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.264153 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.264180 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.264203 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:35Z","lastTransitionTime":"2025-10-06T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.367034 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.367105 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.367124 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.367150 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.367168 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:35Z","lastTransitionTime":"2025-10-06T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.470278 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.470340 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.470356 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.470380 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.470397 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:35Z","lastTransitionTime":"2025-10-06T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.484045 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:35 crc kubenswrapper[5014]: E1006 21:31:35.484242 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.509010 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.528355 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.547513 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.567026 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.580400 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.580472 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.580538 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.580569 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.580599 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:35Z","lastTransitionTime":"2025-10-06T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.602939 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:27Z\\\",\\\"message\\\":\\\"rable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1006 21:31:27.887087 6437 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1006 21:31:27.887164 6437 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-console-operator/metrics]} name:Service_openshift-console-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.88:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ebd4748e-0473-49fb-88ad-83dbb221791a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.619642 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.650404 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.671112 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.687705 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.687752 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.687769 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.687799 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.687845 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:35Z","lastTransitionTime":"2025-10-06T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.690026 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.712034 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.737308 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.758215 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.775091 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-chcf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-chcf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.790949 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.791112 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.791144 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.791216 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.791255 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:35Z","lastTransitionTime":"2025-10-06T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.808327 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.829733 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.847969 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.865356 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:35Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.894064 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.894155 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.894217 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.894250 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.894313 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:35Z","lastTransitionTime":"2025-10-06T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.997254 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.997334 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.997402 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.997436 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:35 crc kubenswrapper[5014]: I1006 21:31:35.997460 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:35Z","lastTransitionTime":"2025-10-06T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.100661 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.100729 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.100753 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.100784 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.100806 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:36Z","lastTransitionTime":"2025-10-06T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.203582 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.203679 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.203705 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.203736 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.203759 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:36Z","lastTransitionTime":"2025-10-06T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.307006 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.307069 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.307086 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.307116 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.307134 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:36Z","lastTransitionTime":"2025-10-06T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.439656 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.439717 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.439735 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.439758 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.439776 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:36Z","lastTransitionTime":"2025-10-06T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.483978 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.484044 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.484064 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:36 crc kubenswrapper[5014]: E1006 21:31:36.484192 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:36 crc kubenswrapper[5014]: E1006 21:31:36.484369 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:36 crc kubenswrapper[5014]: E1006 21:31:36.484529 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.542785 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.542867 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.542886 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.542915 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.542941 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:36Z","lastTransitionTime":"2025-10-06T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.646152 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.646215 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.646235 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.646259 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.646274 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:36Z","lastTransitionTime":"2025-10-06T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.749813 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.749882 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.749899 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.749927 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.749946 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:36Z","lastTransitionTime":"2025-10-06T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.854745 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.854812 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.854854 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.854887 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.854908 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:36Z","lastTransitionTime":"2025-10-06T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.958716 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.958779 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.958797 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.958822 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:36 crc kubenswrapper[5014]: I1006 21:31:36.958840 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:36Z","lastTransitionTime":"2025-10-06T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.062161 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.062216 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.062236 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.062258 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.062274 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:37Z","lastTransitionTime":"2025-10-06T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.166308 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.166396 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.166422 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.166456 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.166481 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:37Z","lastTransitionTime":"2025-10-06T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.269921 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.269965 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.269976 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.269991 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.270003 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:37Z","lastTransitionTime":"2025-10-06T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.373241 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.373297 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.373314 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.373337 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.373356 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:37Z","lastTransitionTime":"2025-10-06T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.476884 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.476948 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.476965 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.476998 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.477015 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:37Z","lastTransitionTime":"2025-10-06T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.484305 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:37 crc kubenswrapper[5014]: E1006 21:31:37.484465 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.579613 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.579697 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.579716 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.579741 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.579760 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:37Z","lastTransitionTime":"2025-10-06T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.682964 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.683022 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.683040 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.683067 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.683084 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:37Z","lastTransitionTime":"2025-10-06T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.786300 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.786877 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.786994 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.787124 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.787269 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:37Z","lastTransitionTime":"2025-10-06T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.889665 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.889897 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.889972 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.890039 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.890099 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:37Z","lastTransitionTime":"2025-10-06T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.977308 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs\") pod \"network-metrics-daemon-chcf6\" (UID: \"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\") " pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:37 crc kubenswrapper[5014]: E1006 21:31:37.977603 5014 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 06 21:31:37 crc kubenswrapper[5014]: E1006 21:31:37.977746 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs podName:e4dbffab-5f6a-4ba5-b0c3-68e7e8840621 nodeName:}" failed. No retries permitted until 2025-10-06 21:31:45.977716144 +0000 UTC m=+51.270752908 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs") pod "network-metrics-daemon-chcf6" (UID: "e4dbffab-5f6a-4ba5-b0c3-68e7e8840621") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.993556 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.993821 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.993967 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.994112 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:37 crc kubenswrapper[5014]: I1006 21:31:37.994243 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:37Z","lastTransitionTime":"2025-10-06T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.097231 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.097309 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.097333 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.097365 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.097388 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:38Z","lastTransitionTime":"2025-10-06T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.200672 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.200750 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.200774 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.200806 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.200825 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:38Z","lastTransitionTime":"2025-10-06T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.304182 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.304265 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.304292 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.304325 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.304347 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:38Z","lastTransitionTime":"2025-10-06T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.407223 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.407309 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.407338 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.407376 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.407398 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:38Z","lastTransitionTime":"2025-10-06T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.483681 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.483697 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:38 crc kubenswrapper[5014]: E1006 21:31:38.484235 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:38 crc kubenswrapper[5014]: E1006 21:31:38.484118 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.483762 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:38 crc kubenswrapper[5014]: E1006 21:31:38.484452 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.510133 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.510346 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.510419 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.510456 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.510481 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:38Z","lastTransitionTime":"2025-10-06T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.613830 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.614145 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.614233 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.614332 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.614427 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:38Z","lastTransitionTime":"2025-10-06T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.717193 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.717258 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.717275 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.717311 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.717330 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:38Z","lastTransitionTime":"2025-10-06T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.820133 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.820587 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.820796 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.820960 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.821108 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:38Z","lastTransitionTime":"2025-10-06T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.923033 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.923328 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.923395 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.923515 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:38 crc kubenswrapper[5014]: I1006 21:31:38.923698 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:38Z","lastTransitionTime":"2025-10-06T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.027095 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.027160 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.027178 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.027208 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.027231 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:39Z","lastTransitionTime":"2025-10-06T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.130075 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.130113 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.130126 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.130153 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.130167 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:39Z","lastTransitionTime":"2025-10-06T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.233955 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.234442 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.234689 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.234922 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.235119 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:39Z","lastTransitionTime":"2025-10-06T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.338943 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.339010 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.339029 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.339056 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.339075 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:39Z","lastTransitionTime":"2025-10-06T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.442092 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.442132 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.442142 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.442156 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.442168 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:39Z","lastTransitionTime":"2025-10-06T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.483983 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:39 crc kubenswrapper[5014]: E1006 21:31:39.484485 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.545707 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.545773 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.545794 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.545820 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.545837 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:39Z","lastTransitionTime":"2025-10-06T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.649706 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.649786 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.649810 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.649842 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.649863 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:39Z","lastTransitionTime":"2025-10-06T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.753015 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.753075 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.753092 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.753115 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.753131 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:39Z","lastTransitionTime":"2025-10-06T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.858360 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.858457 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.858482 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.858552 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.858579 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:39Z","lastTransitionTime":"2025-10-06T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.961361 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.961800 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.961985 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.962224 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:39 crc kubenswrapper[5014]: I1006 21:31:39.962435 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:39Z","lastTransitionTime":"2025-10-06T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.065671 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.066068 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.066241 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.066440 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.066664 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:40Z","lastTransitionTime":"2025-10-06T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.169879 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.169954 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.169971 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.170001 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.170018 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:40Z","lastTransitionTime":"2025-10-06T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.272985 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.273378 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.273536 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.273731 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.273893 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:40Z","lastTransitionTime":"2025-10-06T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.376702 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.377081 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.377326 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.377545 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.377798 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:40Z","lastTransitionTime":"2025-10-06T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.480311 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.480372 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.480391 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.480416 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.480433 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:40Z","lastTransitionTime":"2025-10-06T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.483996 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.484019 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.484061 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:40 crc kubenswrapper[5014]: E1006 21:31:40.484127 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:40 crc kubenswrapper[5014]: E1006 21:31:40.484258 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:40 crc kubenswrapper[5014]: E1006 21:31:40.484301 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.583143 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.583179 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.583187 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.583217 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.583227 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:40Z","lastTransitionTime":"2025-10-06T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.686202 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.686270 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.686287 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.686312 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.686331 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:40Z","lastTransitionTime":"2025-10-06T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.789042 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.789107 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.789123 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.789148 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.789165 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:40Z","lastTransitionTime":"2025-10-06T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.892017 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.892063 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.892076 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.892092 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.892113 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:40Z","lastTransitionTime":"2025-10-06T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.994696 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.994762 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.994779 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.994809 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:40 crc kubenswrapper[5014]: I1006 21:31:40.994826 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:40Z","lastTransitionTime":"2025-10-06T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.098098 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.098149 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.098203 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.098225 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.098244 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:41Z","lastTransitionTime":"2025-10-06T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.201264 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.201324 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.201341 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.201366 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.201385 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:41Z","lastTransitionTime":"2025-10-06T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.278956 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.279013 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.279029 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.279052 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.279069 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:41Z","lastTransitionTime":"2025-10-06T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:41 crc kubenswrapper[5014]: E1006 21:31:41.303668 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:41Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.308059 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.308115 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.308123 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.308140 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.308364 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:41Z","lastTransitionTime":"2025-10-06T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:41 crc kubenswrapper[5014]: E1006 21:31:41.327007 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:41Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.331427 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.331472 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.331487 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.331499 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.331506 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:41Z","lastTransitionTime":"2025-10-06T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:41 crc kubenswrapper[5014]: E1006 21:31:41.350055 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:41Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.354824 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.354879 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.354902 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.354926 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.354945 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:41Z","lastTransitionTime":"2025-10-06T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:41 crc kubenswrapper[5014]: E1006 21:31:41.378656 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:41Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.383463 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.383557 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.383581 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.383604 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.383685 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:41Z","lastTransitionTime":"2025-10-06T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:41 crc kubenswrapper[5014]: E1006 21:31:41.404962 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:41Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:41 crc kubenswrapper[5014]: E1006 21:31:41.405183 5014 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.407192 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.407271 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.407289 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.407310 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.407326 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:41Z","lastTransitionTime":"2025-10-06T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.483958 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:41 crc kubenswrapper[5014]: E1006 21:31:41.484174 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.485262 5014 scope.go:117] "RemoveContainer" containerID="db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.510290 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.510352 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.510370 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.510395 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.510414 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:41Z","lastTransitionTime":"2025-10-06T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.613297 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.613748 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.613767 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.613792 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.613810 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:41Z","lastTransitionTime":"2025-10-06T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.718144 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.718187 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.718198 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.718216 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.718227 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:41Z","lastTransitionTime":"2025-10-06T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.821632 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.821670 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.821680 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.821697 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.821707 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:41Z","lastTransitionTime":"2025-10-06T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.912576 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovnkube-controller/1.log" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.917788 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerStarted","Data":"a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46"} Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.919113 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.924981 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.925029 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.925046 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.925092 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.925111 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:41Z","lastTransitionTime":"2025-10-06T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.947888 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:41Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:41 crc kubenswrapper[5014]: I1006 21:31:41.978582 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:41Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.003460 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:41Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.021997 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:42Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.031107 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.031150 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.031162 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.031180 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.031197 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:42Z","lastTransitionTime":"2025-10-06T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.050216 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:42Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.066749 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:42Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.090324 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:27Z\\\",\\\"message\\\":\\\"rable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1006 21:31:27.887087 6437 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1006 21:31:27.887164 6437 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-console-operator/metrics]} name:Service_openshift-console-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.88:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ebd4748e-0473-49fb-88ad-83dbb221791a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:42Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.102682 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:42Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.120706 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:42Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.133519 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.133591 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.133614 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.133692 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.133715 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:42Z","lastTransitionTime":"2025-10-06T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.136105 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:42Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.145784 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:42Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.166375 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:42Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.190839 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:42Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.206439 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:42Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.226706 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:42Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.236754 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.236807 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.236822 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.236842 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.236855 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:42Z","lastTransitionTime":"2025-10-06T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.244582 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:42Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.256407 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-chcf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-chcf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:42Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.339139 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.339205 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.339222 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.339246 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.339266 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:42Z","lastTransitionTime":"2025-10-06T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.442957 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.443014 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.443031 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.443054 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.443071 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:42Z","lastTransitionTime":"2025-10-06T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.484543 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.484587 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.484555 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:42 crc kubenswrapper[5014]: E1006 21:31:42.484776 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:42 crc kubenswrapper[5014]: E1006 21:31:42.484913 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:42 crc kubenswrapper[5014]: E1006 21:31:42.485059 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.546000 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.546061 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.546088 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.546118 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.546141 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:42Z","lastTransitionTime":"2025-10-06T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.649318 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.649365 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.649382 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.649402 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.649419 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:42Z","lastTransitionTime":"2025-10-06T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.752398 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.752441 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.752459 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.752481 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.752496 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:42Z","lastTransitionTime":"2025-10-06T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.854917 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.854982 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.855005 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.855033 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.855055 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:42Z","lastTransitionTime":"2025-10-06T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.924781 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovnkube-controller/2.log" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.925846 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovnkube-controller/1.log" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.929921 5014 generic.go:334] "Generic (PLEG): container finished" podID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerID="a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46" exitCode=1 Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.929968 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerDied","Data":"a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46"} Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.930009 5014 scope.go:117] "RemoveContainer" containerID="db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.931099 5014 scope.go:117] "RemoveContainer" containerID="a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46" Oct 06 21:31:42 crc kubenswrapper[5014]: E1006 21:31:42.931389 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.950889 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:42Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.957203 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.957305 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.957363 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.957386 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.957439 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:42Z","lastTransitionTime":"2025-10-06T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:42 crc kubenswrapper[5014]: I1006 21:31:42.971866 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:42Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.006527 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db5f26a35b2115d078a6f9f29489588e27cc5e9052dd0256a786347950f66d9e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:27Z\\\",\\\"message\\\":\\\"rable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1006 21:31:27.887087 6437 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-image-registry/image-registry]} name:Service_openshift-image-registry/image-registry_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.93:5000:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {83c1e277-3d22-42ae-a355-f7a0ff0bd171}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1006 21:31:27.887164 6437 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-console-operator/metrics]} name:Service_openshift-console-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.88:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ebd4748e-0473-49fb-88ad-83dbb221791a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:42Z\\\",\\\"message\\\":\\\"6 21:31:42.503008 6641 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1006 21:31:42.503035 6641 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1006 21:31:42.503045 6641 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1006 21:31:42.503080 6641 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1006 21:31:42.503103 6641 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1006 21:31:42.503113 6641 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1006 21:31:42.503136 6641 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1006 21:31:42.503181 6641 factory.go:656] Stopping watch factory\\\\nI1006 21:31:42.503201 6641 ovnkube.go:599] Stopped ovnkube\\\\nI1006 21:31:42.503243 6641 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1006 21:31:42.503270 6641 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1006 21:31:42.503284 6641 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1006 21:31:42.503301 6641 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1006 21:31:42.503315 6641 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1006 21:31:42.503329 6641 handler.go:208] Removed *v1.Node event handler 2\\\\nI1006 21:31:42.503343 6641 handler.go:208] Removed *v1.Node event handler 7\\\\nI1006 21:31:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.026411 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.047214 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.060733 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.060789 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.060808 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.060837 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.060859 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:43Z","lastTransitionTime":"2025-10-06T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.068064 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.084096 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.104062 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.130224 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.147777 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.164059 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-chcf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-chcf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.164111 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.164313 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.164335 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.164370 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.164390 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:43Z","lastTransitionTime":"2025-10-06T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.196189 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.215253 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.232925 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.247551 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.268285 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.268346 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.268363 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.268389 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.268408 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:43Z","lastTransitionTime":"2025-10-06T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.268305 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.285244 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.370758 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.370835 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.370861 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.370887 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.370905 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:43Z","lastTransitionTime":"2025-10-06T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.473520 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.473589 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.473607 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.473662 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.473681 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:43Z","lastTransitionTime":"2025-10-06T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.484247 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:43 crc kubenswrapper[5014]: E1006 21:31:43.484458 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.576505 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.576585 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.576610 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.576677 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.576696 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:43Z","lastTransitionTime":"2025-10-06T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.680404 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.680463 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.680482 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.680506 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.680524 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:43Z","lastTransitionTime":"2025-10-06T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.783162 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.783223 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.783240 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.783263 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.783280 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:43Z","lastTransitionTime":"2025-10-06T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.886018 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.886077 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.886094 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.886121 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.886139 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:43Z","lastTransitionTime":"2025-10-06T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.937033 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovnkube-controller/2.log" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.942517 5014 scope.go:117] "RemoveContainer" containerID="a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46" Oct 06 21:31:43 crc kubenswrapper[5014]: E1006 21:31:43.942970 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.965407 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.983195 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.989089 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.989129 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.989146 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.989169 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:43 crc kubenswrapper[5014]: I1006 21:31:43.989187 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:43Z","lastTransitionTime":"2025-10-06T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.001728 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:43Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.020936 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.054021 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:42Z\\\",\\\"message\\\":\\\"6 21:31:42.503008 6641 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1006 21:31:42.503035 6641 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1006 21:31:42.503045 6641 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1006 21:31:42.503080 6641 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1006 21:31:42.503103 6641 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1006 21:31:42.503113 6641 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1006 21:31:42.503136 6641 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1006 21:31:42.503181 6641 factory.go:656] Stopping watch factory\\\\nI1006 21:31:42.503201 6641 ovnkube.go:599] Stopped ovnkube\\\\nI1006 21:31:42.503243 6641 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1006 21:31:42.503270 6641 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1006 21:31:42.503284 6641 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1006 21:31:42.503301 6641 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1006 21:31:42.503315 6641 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1006 21:31:42.503329 6641 handler.go:208] Removed *v1.Node event handler 2\\\\nI1006 21:31:42.503343 6641 handler.go:208] Removed *v1.Node event handler 7\\\\nI1006 21:31:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.076433 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.079653 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.092332 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.092408 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.092432 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.092462 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.092485 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:44Z","lastTransitionTime":"2025-10-06T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.094490 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.110472 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.151164 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.162939 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.176426 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.202310 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.202543 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.202723 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.202739 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.202760 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.202780 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:44Z","lastTransitionTime":"2025-10-06T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.222640 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.238500 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-chcf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-chcf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.271496 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.295454 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.305766 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.305836 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.305859 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.305894 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.305919 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:44Z","lastTransitionTime":"2025-10-06T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.314954 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.330833 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.352654 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.373875 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.392961 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.409328 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.409397 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.409352 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.409422 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.409690 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.409720 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:44Z","lastTransitionTime":"2025-10-06T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.430837 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.454296 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.472295 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.484218 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.484269 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.484221 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:44 crc kubenswrapper[5014]: E1006 21:31:44.484459 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:44 crc kubenswrapper[5014]: E1006 21:31:44.484604 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:44 crc kubenswrapper[5014]: E1006 21:31:44.484780 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.488500 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-chcf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-chcf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.512978 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.513033 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.513051 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.513075 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.513096 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:44Z","lastTransitionTime":"2025-10-06T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.525707 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.545770 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.564606 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.581336 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.604554 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.615732 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.615800 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.615817 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.615843 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.615863 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:44Z","lastTransitionTime":"2025-10-06T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.626754 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.646285 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9a0d7e7-de40-4e11-933d-563f96444a66\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1a3264bc2aabb1a0da4bd54eae121a419a29be7608fbd1811766ff00c8e123b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81f643a744f6ab40dd2ab7288d53704dabfc29d25088545333b7475bcabfeb79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a852a360b8208306c0db26edde70be0c758128fca997ba3a19cf6a3ceaf31240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.666306 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.682861 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.710491 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:42Z\\\",\\\"message\\\":\\\"6 21:31:42.503008 6641 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1006 21:31:42.503035 6641 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1006 21:31:42.503045 6641 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1006 21:31:42.503080 6641 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1006 21:31:42.503103 6641 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1006 21:31:42.503113 6641 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1006 21:31:42.503136 6641 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1006 21:31:42.503181 6641 factory.go:656] Stopping watch factory\\\\nI1006 21:31:42.503201 6641 ovnkube.go:599] Stopped ovnkube\\\\nI1006 21:31:42.503243 6641 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1006 21:31:42.503270 6641 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1006 21:31:42.503284 6641 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1006 21:31:42.503301 6641 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1006 21:31:42.503315 6641 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1006 21:31:42.503329 6641 handler.go:208] Removed *v1.Node event handler 2\\\\nI1006 21:31:42.503343 6641 handler.go:208] Removed *v1.Node event handler 7\\\\nI1006 21:31:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:44Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.718356 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.718396 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.718411 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.718435 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.718450 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:44Z","lastTransitionTime":"2025-10-06T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.821904 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.821969 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.821987 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.822011 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.822029 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:44Z","lastTransitionTime":"2025-10-06T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.924599 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.924676 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.924695 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.924719 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:44 crc kubenswrapper[5014]: I1006 21:31:44.924737 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:44Z","lastTransitionTime":"2025-10-06T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.027584 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.027682 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.027700 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.027725 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.027743 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:45Z","lastTransitionTime":"2025-10-06T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.130868 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.130929 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.130947 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.130972 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.130993 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:45Z","lastTransitionTime":"2025-10-06T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.233970 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.234066 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.234085 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.234111 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.234131 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:45Z","lastTransitionTime":"2025-10-06T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.356532 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.356589 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.356606 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.356663 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.356681 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:45Z","lastTransitionTime":"2025-10-06T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.459515 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.459589 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.459607 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.459723 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.459744 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:45Z","lastTransitionTime":"2025-10-06T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.484691 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:45 crc kubenswrapper[5014]: E1006 21:31:45.485008 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.509222 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.523397 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.540162 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.562166 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.562877 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.562924 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.562943 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.562969 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.562987 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:45Z","lastTransitionTime":"2025-10-06T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.579790 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.605762 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.625453 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.643288 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-chcf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-chcf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.660328 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.665481 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.665533 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.665551 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.665577 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.665594 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:45Z","lastTransitionTime":"2025-10-06T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.694741 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.715032 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.734259 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.755658 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.767786 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.767842 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.767859 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.767884 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.767900 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:45Z","lastTransitionTime":"2025-10-06T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.776706 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.808720 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:42Z\\\",\\\"message\\\":\\\"6 21:31:42.503008 6641 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1006 21:31:42.503035 6641 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1006 21:31:42.503045 6641 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1006 21:31:42.503080 6641 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1006 21:31:42.503103 6641 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1006 21:31:42.503113 6641 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1006 21:31:42.503136 6641 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1006 21:31:42.503181 6641 factory.go:656] Stopping watch factory\\\\nI1006 21:31:42.503201 6641 ovnkube.go:599] Stopped ovnkube\\\\nI1006 21:31:42.503243 6641 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1006 21:31:42.503270 6641 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1006 21:31:42.503284 6641 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1006 21:31:42.503301 6641 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1006 21:31:42.503315 6641 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1006 21:31:42.503329 6641 handler.go:208] Removed *v1.Node event handler 2\\\\nI1006 21:31:42.503343 6641 handler.go:208] Removed *v1.Node event handler 7\\\\nI1006 21:31:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.826970 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9a0d7e7-de40-4e11-933d-563f96444a66\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1a3264bc2aabb1a0da4bd54eae121a419a29be7608fbd1811766ff00c8e123b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81f643a744f6ab40dd2ab7288d53704dabfc29d25088545333b7475bcabfeb79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a852a360b8208306c0db26edde70be0c758128fca997ba3a19cf6a3ceaf31240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.844752 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.863489 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:45Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.870403 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.870485 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.870505 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.870528 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.870545 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:45Z","lastTransitionTime":"2025-10-06T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.973550 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.973680 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.973707 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.973735 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:45 crc kubenswrapper[5014]: I1006 21:31:45.973758 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:45Z","lastTransitionTime":"2025-10-06T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.070759 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs\") pod \"network-metrics-daemon-chcf6\" (UID: \"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\") " pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.070976 5014 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.071044 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs podName:e4dbffab-5f6a-4ba5-b0c3-68e7e8840621 nodeName:}" failed. No retries permitted until 2025-10-06 21:32:02.071021114 +0000 UTC m=+67.364057888 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs") pod "network-metrics-daemon-chcf6" (UID: "e4dbffab-5f6a-4ba5-b0c3-68e7e8840621") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.077419 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.077536 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.077680 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.077718 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.077772 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:46Z","lastTransitionTime":"2025-10-06T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.181031 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.181099 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.181123 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.181152 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.181170 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:46Z","lastTransitionTime":"2025-10-06T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.284000 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.284050 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.284066 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.284090 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.284122 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:46Z","lastTransitionTime":"2025-10-06T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.374238 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.374499 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:32:18.374455997 +0000 UTC m=+83.667492771 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.387464 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.387525 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.387543 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.387565 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.387583 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:46Z","lastTransitionTime":"2025-10-06T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.475543 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.475674 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.475737 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.475790 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.475890 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.475921 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.475958 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.475970 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.475991 5014 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.475996 5014 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.476097 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-06 21:32:18.476062714 +0000 UTC m=+83.769099488 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.476167 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-06 21:32:18.476140226 +0000 UTC m=+83.769177000 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.475960 5014 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.476223 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-06 21:32:18.476209289 +0000 UTC m=+83.769246063 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.476331 5014 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.476396 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-06 21:32:18.476380575 +0000 UTC m=+83.769417349 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.484124 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.484270 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.484352 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.484448 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.484481 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:46 crc kubenswrapper[5014]: E1006 21:31:46.484586 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.491294 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.491370 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.491390 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.491418 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.491438 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:46Z","lastTransitionTime":"2025-10-06T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.595087 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.595166 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.595191 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.595219 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.595242 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:46Z","lastTransitionTime":"2025-10-06T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.699173 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.699240 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.699258 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.699285 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.699303 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:46Z","lastTransitionTime":"2025-10-06T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.802825 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.802918 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.802967 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.802992 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.803009 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:46Z","lastTransitionTime":"2025-10-06T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.905218 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.905283 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.905301 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.905323 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:46 crc kubenswrapper[5014]: I1006 21:31:46.905340 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:46Z","lastTransitionTime":"2025-10-06T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.007768 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.007823 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.007842 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.007867 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.007885 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:47Z","lastTransitionTime":"2025-10-06T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.110806 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.110881 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.110900 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.110929 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.110952 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:47Z","lastTransitionTime":"2025-10-06T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.214573 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.214685 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.214711 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.214812 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.214849 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:47Z","lastTransitionTime":"2025-10-06T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.317794 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.317861 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.317874 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.317897 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.317916 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:47Z","lastTransitionTime":"2025-10-06T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.420307 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.420393 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.420412 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.420444 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.420467 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:47Z","lastTransitionTime":"2025-10-06T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.484456 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:47 crc kubenswrapper[5014]: E1006 21:31:47.485003 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.523363 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.523436 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.523460 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.523482 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.523500 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:47Z","lastTransitionTime":"2025-10-06T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.625760 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.625831 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.625849 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.625875 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.625890 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:47Z","lastTransitionTime":"2025-10-06T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.728366 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.728395 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.728403 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.728415 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.728424 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:47Z","lastTransitionTime":"2025-10-06T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.831444 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.831516 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.831532 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.831555 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.831572 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:47Z","lastTransitionTime":"2025-10-06T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.935078 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.935163 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.935188 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.935222 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:47 crc kubenswrapper[5014]: I1006 21:31:47.935247 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:47Z","lastTransitionTime":"2025-10-06T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.037880 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.037961 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.037985 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.038015 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.038037 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:48Z","lastTransitionTime":"2025-10-06T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.141956 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.142024 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.142049 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.142077 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.142099 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:48Z","lastTransitionTime":"2025-10-06T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.245410 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.245479 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.245502 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.245532 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.245550 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:48Z","lastTransitionTime":"2025-10-06T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.348661 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.348723 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.348740 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.348771 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.348790 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:48Z","lastTransitionTime":"2025-10-06T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.452249 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.452325 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.452349 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.452377 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.452439 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:48Z","lastTransitionTime":"2025-10-06T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.484031 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.484082 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.484092 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:48 crc kubenswrapper[5014]: E1006 21:31:48.484445 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:48 crc kubenswrapper[5014]: E1006 21:31:48.484539 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:48 crc kubenswrapper[5014]: E1006 21:31:48.484693 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.556203 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.556260 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.556277 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.556300 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.556317 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:48Z","lastTransitionTime":"2025-10-06T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.659366 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.659429 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.659448 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.659474 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.659493 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:48Z","lastTransitionTime":"2025-10-06T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.762713 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.762775 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.762793 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.762820 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.762839 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:48Z","lastTransitionTime":"2025-10-06T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.865692 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.865743 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.865762 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.865784 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.865801 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:48Z","lastTransitionTime":"2025-10-06T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.968780 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.968853 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.968878 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.968909 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:48 crc kubenswrapper[5014]: I1006 21:31:48.968932 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:48Z","lastTransitionTime":"2025-10-06T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.071983 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.072043 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.072066 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.072121 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.072147 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:49Z","lastTransitionTime":"2025-10-06T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.175043 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.175100 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.175122 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.175152 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.175178 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:49Z","lastTransitionTime":"2025-10-06T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.278434 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.278486 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.278503 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.278525 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.278546 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:49Z","lastTransitionTime":"2025-10-06T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.381856 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.381936 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.381954 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.381980 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.382003 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:49Z","lastTransitionTime":"2025-10-06T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.484062 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:49 crc kubenswrapper[5014]: E1006 21:31:49.484260 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.486108 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.486167 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.486188 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.486216 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.486238 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:49Z","lastTransitionTime":"2025-10-06T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.590774 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.590826 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.590842 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.590866 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.590883 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:49Z","lastTransitionTime":"2025-10-06T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.693861 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.693911 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.693929 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.693950 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.693968 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:49Z","lastTransitionTime":"2025-10-06T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.797804 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.797868 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.797890 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.797921 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.797942 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:49Z","lastTransitionTime":"2025-10-06T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.900873 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.900959 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.900978 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.901014 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:49 crc kubenswrapper[5014]: I1006 21:31:49.901035 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:49Z","lastTransitionTime":"2025-10-06T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.003891 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.003953 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.003973 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.003997 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.004016 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:50Z","lastTransitionTime":"2025-10-06T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.106900 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.106961 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.106979 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.107006 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.107023 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:50Z","lastTransitionTime":"2025-10-06T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.210429 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.210496 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.210513 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.210538 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.210559 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:50Z","lastTransitionTime":"2025-10-06T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.313982 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.314051 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.314072 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.314098 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.314118 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:50Z","lastTransitionTime":"2025-10-06T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.417550 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.417615 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.417677 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.417700 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.417718 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:50Z","lastTransitionTime":"2025-10-06T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.483972 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:50 crc kubenswrapper[5014]: E1006 21:31:50.484709 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.484946 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.484946 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:50 crc kubenswrapper[5014]: E1006 21:31:50.485171 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:50 crc kubenswrapper[5014]: E1006 21:31:50.485350 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.521292 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.521368 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.521393 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.521425 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.521456 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:50Z","lastTransitionTime":"2025-10-06T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.623947 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.623989 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.624007 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.624030 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.624052 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:50Z","lastTransitionTime":"2025-10-06T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.726769 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.726832 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.726849 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.726874 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.726891 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:50Z","lastTransitionTime":"2025-10-06T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.830092 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.830151 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.830167 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.830192 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.830210 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:50Z","lastTransitionTime":"2025-10-06T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.933501 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.933561 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.933799 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.934050 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:50 crc kubenswrapper[5014]: I1006 21:31:50.934070 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:50Z","lastTransitionTime":"2025-10-06T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.037172 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.037226 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.037251 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.037277 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.037296 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:51Z","lastTransitionTime":"2025-10-06T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.140760 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.140841 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.140866 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.140898 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.140921 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:51Z","lastTransitionTime":"2025-10-06T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.244929 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.244980 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.244996 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.245019 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.245035 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:51Z","lastTransitionTime":"2025-10-06T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.347708 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.348042 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.348192 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.348332 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.348468 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:51Z","lastTransitionTime":"2025-10-06T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.452255 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.453047 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.453206 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.453344 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.453469 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:51Z","lastTransitionTime":"2025-10-06T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.483979 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:51 crc kubenswrapper[5014]: E1006 21:31:51.484167 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.556311 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.556684 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.556847 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.556997 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.557140 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:51Z","lastTransitionTime":"2025-10-06T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.560722 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.560947 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.561134 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.561357 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.561547 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:51Z","lastTransitionTime":"2025-10-06T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:51 crc kubenswrapper[5014]: E1006 21:31:51.583303 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:51Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.589157 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.589356 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.589500 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.589680 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.589834 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:51Z","lastTransitionTime":"2025-10-06T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:51 crc kubenswrapper[5014]: E1006 21:31:51.610495 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:51Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.616091 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.616155 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.616174 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.616200 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.616217 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:51Z","lastTransitionTime":"2025-10-06T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:51 crc kubenswrapper[5014]: E1006 21:31:51.638913 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:51Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.644855 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.644927 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.644950 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.644985 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.645007 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:51Z","lastTransitionTime":"2025-10-06T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:51 crc kubenswrapper[5014]: E1006 21:31:51.666168 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:51Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.671578 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.671663 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.671681 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.671704 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.671723 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:51Z","lastTransitionTime":"2025-10-06T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:51 crc kubenswrapper[5014]: E1006 21:31:51.692264 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:51Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:51 crc kubenswrapper[5014]: E1006 21:31:51.692485 5014 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.694491 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.694536 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.694552 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.694571 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.694587 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:51Z","lastTransitionTime":"2025-10-06T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.797108 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.797166 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.797183 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.797206 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.797224 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:51Z","lastTransitionTime":"2025-10-06T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.900604 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.900677 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.900696 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.900720 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:51 crc kubenswrapper[5014]: I1006 21:31:51.900739 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:51Z","lastTransitionTime":"2025-10-06T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.002487 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.002524 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.002535 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.002551 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.002563 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:52Z","lastTransitionTime":"2025-10-06T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.105068 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.105137 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.105162 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.105186 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.105203 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:52Z","lastTransitionTime":"2025-10-06T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.207398 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.207779 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.207883 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.207990 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.208086 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:52Z","lastTransitionTime":"2025-10-06T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.311782 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.312118 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.312261 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.312373 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.312473 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:52Z","lastTransitionTime":"2025-10-06T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.416080 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.416446 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.416587 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.416764 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.416897 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:52Z","lastTransitionTime":"2025-10-06T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.484133 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:52 crc kubenswrapper[5014]: E1006 21:31:52.484317 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.484588 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:52 crc kubenswrapper[5014]: E1006 21:31:52.484718 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.484169 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:52 crc kubenswrapper[5014]: E1006 21:31:52.487169 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.520261 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.520322 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.520341 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.520365 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.520384 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:52Z","lastTransitionTime":"2025-10-06T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.624073 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.624123 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.624134 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.624151 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.624164 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:52Z","lastTransitionTime":"2025-10-06T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.727763 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.727845 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.727865 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.727892 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.727910 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:52Z","lastTransitionTime":"2025-10-06T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.830310 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.830380 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.830405 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.830434 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.830459 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:52Z","lastTransitionTime":"2025-10-06T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.933932 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.933997 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.934015 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.934041 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:52 crc kubenswrapper[5014]: I1006 21:31:52.934060 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:52Z","lastTransitionTime":"2025-10-06T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.036934 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.036976 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.037011 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.037028 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.037041 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:53Z","lastTransitionTime":"2025-10-06T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.139478 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.139546 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.139563 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.139589 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.139607 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:53Z","lastTransitionTime":"2025-10-06T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.242912 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.242975 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.242991 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.243017 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.243035 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:53Z","lastTransitionTime":"2025-10-06T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.345758 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.345954 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.345975 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.346000 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.346018 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:53Z","lastTransitionTime":"2025-10-06T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.449475 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.449540 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.449557 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.449582 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.449602 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:53Z","lastTransitionTime":"2025-10-06T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.484043 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:53 crc kubenswrapper[5014]: E1006 21:31:53.484216 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.553020 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.553066 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.553084 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.553107 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.553124 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:53Z","lastTransitionTime":"2025-10-06T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.655930 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.655998 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.656040 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.656074 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.656096 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:53Z","lastTransitionTime":"2025-10-06T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.758596 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.758703 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.758727 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.758758 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.758778 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:53Z","lastTransitionTime":"2025-10-06T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.862149 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.862197 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.862213 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.862236 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.862253 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:53Z","lastTransitionTime":"2025-10-06T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.964881 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.964931 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.964947 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.964970 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:53 crc kubenswrapper[5014]: I1006 21:31:53.964987 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:53Z","lastTransitionTime":"2025-10-06T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.067425 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.067515 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.067544 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.067576 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.067595 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:54Z","lastTransitionTime":"2025-10-06T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.170849 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.171243 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.171407 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.171558 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.171726 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:54Z","lastTransitionTime":"2025-10-06T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.275107 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.275470 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.275712 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.275958 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.276160 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:54Z","lastTransitionTime":"2025-10-06T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.379672 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.380031 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.380220 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.380414 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.380605 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:54Z","lastTransitionTime":"2025-10-06T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.483455 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:54 crc kubenswrapper[5014]: E1006 21:31:54.483682 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.483815 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:54 crc kubenswrapper[5014]: E1006 21:31:54.483917 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.483980 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:54 crc kubenswrapper[5014]: E1006 21:31:54.484056 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.484529 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.484568 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.484584 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.484604 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.484649 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:54Z","lastTransitionTime":"2025-10-06T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.588143 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.588202 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.588223 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.588248 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.588265 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:54Z","lastTransitionTime":"2025-10-06T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.691221 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.691285 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.691302 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.691327 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.691344 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:54Z","lastTransitionTime":"2025-10-06T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.794547 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.794609 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.794656 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.794682 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.794699 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:54Z","lastTransitionTime":"2025-10-06T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.897912 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.897960 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.897978 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.898001 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:54 crc kubenswrapper[5014]: I1006 21:31:54.898022 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:54Z","lastTransitionTime":"2025-10-06T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.001786 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.001840 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.001857 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.001882 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.001899 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:55Z","lastTransitionTime":"2025-10-06T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.104663 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.104730 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.104752 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.104785 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.104808 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:55Z","lastTransitionTime":"2025-10-06T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.208302 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.208380 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.208404 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.208434 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.208453 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:55Z","lastTransitionTime":"2025-10-06T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.312047 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.312107 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.312126 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.312153 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.312175 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:55Z","lastTransitionTime":"2025-10-06T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.415179 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.415243 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.415261 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.415284 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.415302 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:55Z","lastTransitionTime":"2025-10-06T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.484210 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:55 crc kubenswrapper[5014]: E1006 21:31:55.484712 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.486069 5014 scope.go:117] "RemoveContainer" containerID="a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46" Oct 06 21:31:55 crc kubenswrapper[5014]: E1006 21:31:55.486329 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.510871 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.518264 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.518330 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.518356 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.518462 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.518490 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:55Z","lastTransitionTime":"2025-10-06T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.525527 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.538656 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.552067 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.572678 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.590770 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.608422 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.621778 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.621850 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.621862 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.621881 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.621918 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:55Z","lastTransitionTime":"2025-10-06T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.624917 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-chcf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-chcf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.640663 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.653798 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.678337 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.695464 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.713760 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.724054 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.724099 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.724111 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.724127 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.724140 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:55Z","lastTransitionTime":"2025-10-06T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.729683 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.748663 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.773077 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:42Z\\\",\\\"message\\\":\\\"6 21:31:42.503008 6641 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1006 21:31:42.503035 6641 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1006 21:31:42.503045 6641 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1006 21:31:42.503080 6641 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1006 21:31:42.503103 6641 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1006 21:31:42.503113 6641 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1006 21:31:42.503136 6641 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1006 21:31:42.503181 6641 factory.go:656] Stopping watch factory\\\\nI1006 21:31:42.503201 6641 ovnkube.go:599] Stopped ovnkube\\\\nI1006 21:31:42.503243 6641 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1006 21:31:42.503270 6641 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1006 21:31:42.503284 6641 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1006 21:31:42.503301 6641 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1006 21:31:42.503315 6641 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1006 21:31:42.503329 6641 handler.go:208] Removed *v1.Node event handler 2\\\\nI1006 21:31:42.503343 6641 handler.go:208] Removed *v1.Node event handler 7\\\\nI1006 21:31:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.789203 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9a0d7e7-de40-4e11-933d-563f96444a66\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1a3264bc2aabb1a0da4bd54eae121a419a29be7608fbd1811766ff00c8e123b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81f643a744f6ab40dd2ab7288d53704dabfc29d25088545333b7475bcabfeb79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a852a360b8208306c0db26edde70be0c758128fca997ba3a19cf6a3ceaf31240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.809159 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:31:55Z is after 2025-08-24T17:21:41Z" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.826862 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.826910 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.826931 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.826961 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.826983 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:55Z","lastTransitionTime":"2025-10-06T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.929707 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.929755 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.929768 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.929786 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:55 crc kubenswrapper[5014]: I1006 21:31:55.929800 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:55Z","lastTransitionTime":"2025-10-06T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.033197 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.033523 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.033754 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.033991 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.034216 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:56Z","lastTransitionTime":"2025-10-06T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.137148 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.137849 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.137912 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.137958 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.137983 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:56Z","lastTransitionTime":"2025-10-06T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.241428 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.241486 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.241503 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.241528 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.241546 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:56Z","lastTransitionTime":"2025-10-06T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.344613 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.344703 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.344720 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.344748 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.344766 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:56Z","lastTransitionTime":"2025-10-06T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.448186 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.448253 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.448271 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.448296 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.448312 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:56Z","lastTransitionTime":"2025-10-06T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.483805 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.483995 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.483825 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:56 crc kubenswrapper[5014]: E1006 21:31:56.484236 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:56 crc kubenswrapper[5014]: E1006 21:31:56.484740 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:56 crc kubenswrapper[5014]: E1006 21:31:56.484418 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.551593 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.551702 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.551727 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.551755 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.551777 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:56Z","lastTransitionTime":"2025-10-06T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.655046 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.655099 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.655116 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.655141 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.655159 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:56Z","lastTransitionTime":"2025-10-06T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.757909 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.757977 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.757996 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.758021 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.758041 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:56Z","lastTransitionTime":"2025-10-06T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.860700 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.860766 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.860790 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.860816 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.860834 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:56Z","lastTransitionTime":"2025-10-06T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.964531 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.964615 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.964672 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.964697 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:56 crc kubenswrapper[5014]: I1006 21:31:56.964715 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:56Z","lastTransitionTime":"2025-10-06T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.073721 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.073803 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.073830 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.073863 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.073896 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:57Z","lastTransitionTime":"2025-10-06T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.176740 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.176783 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.176804 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.176829 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.176898 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:57Z","lastTransitionTime":"2025-10-06T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.279873 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.279970 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.279989 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.280013 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.280032 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:57Z","lastTransitionTime":"2025-10-06T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.383000 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.383059 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.383078 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.383102 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.383119 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:57Z","lastTransitionTime":"2025-10-06T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.484686 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:57 crc kubenswrapper[5014]: E1006 21:31:57.484972 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.486242 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.486306 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.486325 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.486348 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.486365 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:57Z","lastTransitionTime":"2025-10-06T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.590360 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.590427 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.590446 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.590471 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.590491 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:57Z","lastTransitionTime":"2025-10-06T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.694720 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.694788 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.694806 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.694831 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.694849 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:57Z","lastTransitionTime":"2025-10-06T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.798952 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.799049 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.799066 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.799095 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.799111 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:57Z","lastTransitionTime":"2025-10-06T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.902854 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.902934 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.902952 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.902985 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:57 crc kubenswrapper[5014]: I1006 21:31:57.903006 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:57Z","lastTransitionTime":"2025-10-06T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.006529 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.006654 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.006678 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.006708 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.006730 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:58Z","lastTransitionTime":"2025-10-06T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.109475 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.109529 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.109546 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.109568 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.109585 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:58Z","lastTransitionTime":"2025-10-06T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.212982 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.213044 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.213066 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.213092 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.213117 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:58Z","lastTransitionTime":"2025-10-06T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.316855 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.316906 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.316918 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.316938 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.316953 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:58Z","lastTransitionTime":"2025-10-06T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.421313 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.421397 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.421424 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.421454 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.421474 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:58Z","lastTransitionTime":"2025-10-06T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.484201 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.484260 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.484262 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:31:58 crc kubenswrapper[5014]: E1006 21:31:58.484444 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:31:58 crc kubenswrapper[5014]: E1006 21:31:58.484550 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:31:58 crc kubenswrapper[5014]: E1006 21:31:58.484742 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.526186 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.526256 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.526283 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.526327 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.526356 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:58Z","lastTransitionTime":"2025-10-06T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.630096 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.630158 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.630181 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.630209 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.630228 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:58Z","lastTransitionTime":"2025-10-06T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.733992 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.734046 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.734058 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.734082 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.734100 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:58Z","lastTransitionTime":"2025-10-06T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.837663 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.837741 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.837765 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.837800 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.837825 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:58Z","lastTransitionTime":"2025-10-06T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.941741 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.941817 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.941832 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.941859 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:58 crc kubenswrapper[5014]: I1006 21:31:58.941873 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:58Z","lastTransitionTime":"2025-10-06T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.045704 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.045758 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.045775 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.045800 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.045819 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:59Z","lastTransitionTime":"2025-10-06T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.149149 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.149220 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.149239 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.149270 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.149296 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:59Z","lastTransitionTime":"2025-10-06T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.253157 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.253290 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.253313 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.253341 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.253363 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:59Z","lastTransitionTime":"2025-10-06T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.356049 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.356094 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.356104 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.356121 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.356172 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:59Z","lastTransitionTime":"2025-10-06T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.459683 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.459805 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.459828 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.459859 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.459881 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:59Z","lastTransitionTime":"2025-10-06T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.484186 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:31:59 crc kubenswrapper[5014]: E1006 21:31:59.484382 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.562730 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.562828 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.562846 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.562869 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.562885 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:59Z","lastTransitionTime":"2025-10-06T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.666985 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.667055 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.667076 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.667101 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.667134 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:59Z","lastTransitionTime":"2025-10-06T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.771041 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.771167 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.771185 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.771210 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.771227 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:59Z","lastTransitionTime":"2025-10-06T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.874163 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.874238 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.874260 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.874289 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.874310 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:59Z","lastTransitionTime":"2025-10-06T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.977273 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.977342 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.977363 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.977392 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:31:59 crc kubenswrapper[5014]: I1006 21:31:59.977410 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:31:59Z","lastTransitionTime":"2025-10-06T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.080851 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.080919 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.080940 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.080967 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.080987 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:00Z","lastTransitionTime":"2025-10-06T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.184924 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.184976 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.184987 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.185007 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.185022 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:00Z","lastTransitionTime":"2025-10-06T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.287244 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.287294 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.287305 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.287322 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.287333 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:00Z","lastTransitionTime":"2025-10-06T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.390475 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.390555 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.390573 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.390610 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.390669 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:00Z","lastTransitionTime":"2025-10-06T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.483868 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.483891 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:00 crc kubenswrapper[5014]: E1006 21:32:00.484101 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.483899 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:00 crc kubenswrapper[5014]: E1006 21:32:00.484208 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:00 crc kubenswrapper[5014]: E1006 21:32:00.484393 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.493878 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.493935 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.493958 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.493987 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.494012 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:00Z","lastTransitionTime":"2025-10-06T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.596157 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.596196 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.596214 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.596241 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.596281 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:00Z","lastTransitionTime":"2025-10-06T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.699305 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.699354 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.699363 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.699378 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.699388 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:00Z","lastTransitionTime":"2025-10-06T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.802087 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.802202 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.802221 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.802253 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.802274 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:00Z","lastTransitionTime":"2025-10-06T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.905096 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.905133 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.905142 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.905159 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:00 crc kubenswrapper[5014]: I1006 21:32:00.905172 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:00Z","lastTransitionTime":"2025-10-06T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.007443 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.007483 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.007494 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.007511 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.007522 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:01Z","lastTransitionTime":"2025-10-06T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.110095 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.110185 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.110200 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.110218 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.110231 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:01Z","lastTransitionTime":"2025-10-06T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.213916 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.214068 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.214135 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.214185 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.214212 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:01Z","lastTransitionTime":"2025-10-06T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.316743 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.316785 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.316811 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.316825 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.316834 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:01Z","lastTransitionTime":"2025-10-06T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.419532 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.419564 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.419574 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.419587 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.419596 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:01Z","lastTransitionTime":"2025-10-06T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.483835 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:01 crc kubenswrapper[5014]: E1006 21:32:01.484045 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.522207 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.522258 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.522276 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.522297 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.522317 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:01Z","lastTransitionTime":"2025-10-06T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.624877 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.624942 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.624958 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.624981 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.624998 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:01Z","lastTransitionTime":"2025-10-06T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.727777 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.727844 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.727866 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.727893 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.727915 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:01Z","lastTransitionTime":"2025-10-06T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.830770 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.830830 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.830846 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.830874 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.830891 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:01Z","lastTransitionTime":"2025-10-06T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.922201 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.922569 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.922761 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.922916 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.923041 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:01Z","lastTransitionTime":"2025-10-06T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:01 crc kubenswrapper[5014]: E1006 21:32:01.938442 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:01Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.943046 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.943110 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.943129 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.943154 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.943174 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:01Z","lastTransitionTime":"2025-10-06T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:01 crc kubenswrapper[5014]: E1006 21:32:01.962125 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:01Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.966172 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.966247 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.966270 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.966298 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.966319 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:01Z","lastTransitionTime":"2025-10-06T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:01 crc kubenswrapper[5014]: E1006 21:32:01.989106 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:01Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.993772 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.993827 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.993844 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.993868 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:01 crc kubenswrapper[5014]: I1006 21:32:01.993885 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:01Z","lastTransitionTime":"2025-10-06T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:02 crc kubenswrapper[5014]: E1006 21:32:02.013242 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:02Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.016966 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.017020 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.017040 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.017064 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.017082 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:02Z","lastTransitionTime":"2025-10-06T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:02 crc kubenswrapper[5014]: E1006 21:32:02.036234 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:02Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:02 crc kubenswrapper[5014]: E1006 21:32:02.036473 5014 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.037838 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.037883 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.037902 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.037922 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.037936 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:02Z","lastTransitionTime":"2025-10-06T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.140082 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.140163 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.140175 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.140189 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.140197 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:02Z","lastTransitionTime":"2025-10-06T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.156590 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs\") pod \"network-metrics-daemon-chcf6\" (UID: \"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\") " pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:02 crc kubenswrapper[5014]: E1006 21:32:02.156725 5014 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 06 21:32:02 crc kubenswrapper[5014]: E1006 21:32:02.156780 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs podName:e4dbffab-5f6a-4ba5-b0c3-68e7e8840621 nodeName:}" failed. No retries permitted until 2025-10-06 21:32:34.156762736 +0000 UTC m=+99.449799470 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs") pod "network-metrics-daemon-chcf6" (UID: "e4dbffab-5f6a-4ba5-b0c3-68e7e8840621") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.242644 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.242756 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.242773 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.242794 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.242812 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:02Z","lastTransitionTime":"2025-10-06T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.345669 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.345923 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.345945 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.345975 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.346000 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:02Z","lastTransitionTime":"2025-10-06T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.448948 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.448997 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.449019 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.449047 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.449069 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:02Z","lastTransitionTime":"2025-10-06T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.483895 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.483942 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:02 crc kubenswrapper[5014]: E1006 21:32:02.483998 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:02 crc kubenswrapper[5014]: E1006 21:32:02.484090 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.484105 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:02 crc kubenswrapper[5014]: E1006 21:32:02.484420 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.552109 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.552155 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.552165 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.552180 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.552191 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:02Z","lastTransitionTime":"2025-10-06T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.654489 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.654523 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.654532 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.654547 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.654557 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:02Z","lastTransitionTime":"2025-10-06T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.757267 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.757325 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.757348 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.757379 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.757400 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:02Z","lastTransitionTime":"2025-10-06T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.859558 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.859600 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.859608 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.859643 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.859653 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:02Z","lastTransitionTime":"2025-10-06T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.961755 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.961808 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.961825 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.961846 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:02 crc kubenswrapper[5014]: I1006 21:32:02.961862 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:02Z","lastTransitionTime":"2025-10-06T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.063764 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.063823 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.063846 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.063876 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.063897 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:03Z","lastTransitionTime":"2025-10-06T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.166589 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.166662 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.166678 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.166699 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.166719 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:03Z","lastTransitionTime":"2025-10-06T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.269241 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.269267 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.269275 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.269285 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.269293 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:03Z","lastTransitionTime":"2025-10-06T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.373208 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.373587 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.373605 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.373658 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.373675 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:03Z","lastTransitionTime":"2025-10-06T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.476286 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.476334 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.476352 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.476373 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.476391 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:03Z","lastTransitionTime":"2025-10-06T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.483733 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:03 crc kubenswrapper[5014]: E1006 21:32:03.483894 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.578706 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.578769 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.578787 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.578810 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.578827 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:03Z","lastTransitionTime":"2025-10-06T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.681659 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.681696 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.681730 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.681743 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.681751 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:03Z","lastTransitionTime":"2025-10-06T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.784503 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.784555 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.784577 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.784599 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.784668 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:03Z","lastTransitionTime":"2025-10-06T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.887374 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.887428 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.887445 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.887470 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.887489 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:03Z","lastTransitionTime":"2025-10-06T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.990025 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.990103 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.990129 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.990154 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:03 crc kubenswrapper[5014]: I1006 21:32:03.990176 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:03Z","lastTransitionTime":"2025-10-06T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.013669 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8ddbf_9f1464a5-d713-4f79-8248-33c69abcdac2/kube-multus/0.log" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.013722 5014 generic.go:334] "Generic (PLEG): container finished" podID="9f1464a5-d713-4f79-8248-33c69abcdac2" containerID="7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4" exitCode=1 Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.013756 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8ddbf" event={"ID":"9f1464a5-d713-4f79-8248-33c69abcdac2","Type":"ContainerDied","Data":"7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4"} Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.014106 5014 scope.go:117] "RemoveContainer" containerID="7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.028563 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9a0d7e7-de40-4e11-933d-563f96444a66\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1a3264bc2aabb1a0da4bd54eae121a419a29be7608fbd1811766ff00c8e123b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81f643a744f6ab40dd2ab7288d53704dabfc29d25088545333b7475bcabfeb79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a852a360b8208306c0db26edde70be0c758128fca997ba3a19cf6a3ceaf31240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.044812 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.058729 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.077817 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:42Z\\\",\\\"message\\\":\\\"6 21:31:42.503008 6641 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1006 21:31:42.503035 6641 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1006 21:31:42.503045 6641 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1006 21:31:42.503080 6641 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1006 21:31:42.503103 6641 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1006 21:31:42.503113 6641 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1006 21:31:42.503136 6641 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1006 21:31:42.503181 6641 factory.go:656] Stopping watch factory\\\\nI1006 21:31:42.503201 6641 ovnkube.go:599] Stopped ovnkube\\\\nI1006 21:31:42.503243 6641 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1006 21:31:42.503270 6641 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1006 21:31:42.503284 6641 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1006 21:31:42.503301 6641 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1006 21:31:42.503315 6641 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1006 21:31:42.503329 6641 handler.go:208] Removed *v1.Node event handler 2\\\\nI1006 21:31:42.503343 6641 handler.go:208] Removed *v1.Node event handler 7\\\\nI1006 21:31:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.091574 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.093012 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.093053 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.093064 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.093080 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.093089 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:04Z","lastTransitionTime":"2025-10-06T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.103192 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.114070 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.124391 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.132484 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.143935 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:32:03Z\\\",\\\"message\\\":\\\"2025-10-06T21:31:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1973f2e1-c7e4-4ce1-9b23-e312a8ca1027\\\\n2025-10-06T21:31:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1973f2e1-c7e4-4ce1-9b23-e312a8ca1027 to /host/opt/cni/bin/\\\\n2025-10-06T21:31:18Z [verbose] multus-daemon started\\\\n2025-10-06T21:31:18Z [verbose] Readiness Indicator file check\\\\n2025-10-06T21:32:03Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.157894 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.168035 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-chcf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-chcf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.183961 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.196019 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.196798 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.196829 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.196841 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.196862 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.196876 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:04Z","lastTransitionTime":"2025-10-06T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.208197 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.216587 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.228319 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.237474 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:04Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.299478 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.299517 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.299531 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.299548 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.299559 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:04Z","lastTransitionTime":"2025-10-06T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.401994 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.402049 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.402068 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.402092 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.402111 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:04Z","lastTransitionTime":"2025-10-06T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.483417 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:04 crc kubenswrapper[5014]: E1006 21:32:04.483582 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.483816 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:04 crc kubenswrapper[5014]: E1006 21:32:04.483896 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.484054 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:04 crc kubenswrapper[5014]: E1006 21:32:04.484243 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.505324 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.505376 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.505393 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.505416 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.505434 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:04Z","lastTransitionTime":"2025-10-06T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.607809 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.607877 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.607896 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.607920 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.607937 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:04Z","lastTransitionTime":"2025-10-06T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.710915 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.710972 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.710989 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.711014 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.711030 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:04Z","lastTransitionTime":"2025-10-06T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.813179 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.813219 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.813230 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.813247 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.813258 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:04Z","lastTransitionTime":"2025-10-06T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.915320 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.915360 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.915369 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.915383 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:04 crc kubenswrapper[5014]: I1006 21:32:04.915394 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:04Z","lastTransitionTime":"2025-10-06T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.018437 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.018488 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.018510 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.018538 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.018561 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:05Z","lastTransitionTime":"2025-10-06T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.020078 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8ddbf_9f1464a5-d713-4f79-8248-33c69abcdac2/kube-multus/0.log" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.020192 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8ddbf" event={"ID":"9f1464a5-d713-4f79-8248-33c69abcdac2","Type":"ContainerStarted","Data":"c0cf1ba6616443cb4de3d3036bcacb1b1672bcd98fc50e2162c40b5fdfcb7583"} Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.038328 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.053141 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.082976 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.102080 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.119776 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.121693 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.121766 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.121792 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.121822 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.121844 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:05Z","lastTransitionTime":"2025-10-06T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.135377 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.152195 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.177927 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:42Z\\\",\\\"message\\\":\\\"6 21:31:42.503008 6641 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1006 21:31:42.503035 6641 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1006 21:31:42.503045 6641 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1006 21:31:42.503080 6641 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1006 21:31:42.503103 6641 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1006 21:31:42.503113 6641 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1006 21:31:42.503136 6641 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1006 21:31:42.503181 6641 factory.go:656] Stopping watch factory\\\\nI1006 21:31:42.503201 6641 ovnkube.go:599] Stopped ovnkube\\\\nI1006 21:31:42.503243 6641 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1006 21:31:42.503270 6641 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1006 21:31:42.503284 6641 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1006 21:31:42.503301 6641 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1006 21:31:42.503315 6641 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1006 21:31:42.503329 6641 handler.go:208] Removed *v1.Node event handler 2\\\\nI1006 21:31:42.503343 6641 handler.go:208] Removed *v1.Node event handler 7\\\\nI1006 21:31:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.199362 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9a0d7e7-de40-4e11-933d-563f96444a66\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1a3264bc2aabb1a0da4bd54eae121a419a29be7608fbd1811766ff00c8e123b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81f643a744f6ab40dd2ab7288d53704dabfc29d25088545333b7475bcabfeb79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a852a360b8208306c0db26edde70be0c758128fca997ba3a19cf6a3ceaf31240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.213001 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.225573 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.225683 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.225708 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.225786 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.225807 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:05Z","lastTransitionTime":"2025-10-06T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.232396 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.250018 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.263058 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.280657 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0cf1ba6616443cb4de3d3036bcacb1b1672bcd98fc50e2162c40b5fdfcb7583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:32:03Z\\\",\\\"message\\\":\\\"2025-10-06T21:31:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1973f2e1-c7e4-4ce1-9b23-e312a8ca1027\\\\n2025-10-06T21:31:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1973f2e1-c7e4-4ce1-9b23-e312a8ca1027 to /host/opt/cni/bin/\\\\n2025-10-06T21:31:18Z [verbose] multus-daemon started\\\\n2025-10-06T21:31:18Z [verbose] Readiness Indicator file check\\\\n2025-10-06T21:32:03Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:32:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.302237 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.316532 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.328701 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.328733 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.328741 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.328754 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.328764 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:05Z","lastTransitionTime":"2025-10-06T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.331667 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.341839 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-chcf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-chcf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.431540 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.431607 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.431667 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.431695 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.431714 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:05Z","lastTransitionTime":"2025-10-06T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.484261 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:05 crc kubenswrapper[5014]: E1006 21:32:05.484467 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.500200 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9a0d7e7-de40-4e11-933d-563f96444a66\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1a3264bc2aabb1a0da4bd54eae121a419a29be7608fbd1811766ff00c8e123b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81f643a744f6ab40dd2ab7288d53704dabfc29d25088545333b7475bcabfeb79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a852a360b8208306c0db26edde70be0c758128fca997ba3a19cf6a3ceaf31240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.516378 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.534837 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.534897 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.534921 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.534948 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.534969 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:05Z","lastTransitionTime":"2025-10-06T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.537177 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.562999 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:42Z\\\",\\\"message\\\":\\\"6 21:31:42.503008 6641 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1006 21:31:42.503035 6641 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1006 21:31:42.503045 6641 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1006 21:31:42.503080 6641 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1006 21:31:42.503103 6641 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1006 21:31:42.503113 6641 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1006 21:31:42.503136 6641 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1006 21:31:42.503181 6641 factory.go:656] Stopping watch factory\\\\nI1006 21:31:42.503201 6641 ovnkube.go:599] Stopped ovnkube\\\\nI1006 21:31:42.503243 6641 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1006 21:31:42.503270 6641 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1006 21:31:42.503284 6641 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1006 21:31:42.503301 6641 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1006 21:31:42.503315 6641 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1006 21:31:42.503329 6641 handler.go:208] Removed *v1.Node event handler 2\\\\nI1006 21:31:42.503343 6641 handler.go:208] Removed *v1.Node event handler 7\\\\nI1006 21:31:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.581029 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.601488 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.613949 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.626816 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.639646 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.639723 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.639747 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.639778 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.639800 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:05Z","lastTransitionTime":"2025-10-06T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.643247 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0cf1ba6616443cb4de3d3036bcacb1b1672bcd98fc50e2162c40b5fdfcb7583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:32:03Z\\\",\\\"message\\\":\\\"2025-10-06T21:31:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1973f2e1-c7e4-4ce1-9b23-e312a8ca1027\\\\n2025-10-06T21:31:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1973f2e1-c7e4-4ce1-9b23-e312a8ca1027 to /host/opt/cni/bin/\\\\n2025-10-06T21:31:18Z [verbose] multus-daemon started\\\\n2025-10-06T21:31:18Z [verbose] Readiness Indicator file check\\\\n2025-10-06T21:32:03Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:32:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.665581 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.682162 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.695340 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-chcf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-chcf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.713396 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.729775 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.742003 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.742038 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.742049 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.742065 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.742078 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:05Z","lastTransitionTime":"2025-10-06T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.745275 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.757436 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.779051 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.796256 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:05Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.844311 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.844372 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.844392 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.844416 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.844433 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:05Z","lastTransitionTime":"2025-10-06T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.950584 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.950643 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.950654 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.950670 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:05 crc kubenswrapper[5014]: I1006 21:32:05.950683 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:05Z","lastTransitionTime":"2025-10-06T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.055823 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.055846 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.055854 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.055864 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.055873 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:06Z","lastTransitionTime":"2025-10-06T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.157920 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.157968 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.157979 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.157996 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.158010 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:06Z","lastTransitionTime":"2025-10-06T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.261104 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.261134 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.261146 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.261159 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.261171 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:06Z","lastTransitionTime":"2025-10-06T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.363475 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.363522 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.363536 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.363552 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.363563 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:06Z","lastTransitionTime":"2025-10-06T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.466458 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.466502 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.466513 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.466529 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.466543 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:06Z","lastTransitionTime":"2025-10-06T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.486837 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:06 crc kubenswrapper[5014]: E1006 21:32:06.486955 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.486845 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.486841 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:06 crc kubenswrapper[5014]: E1006 21:32:06.487105 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:06 crc kubenswrapper[5014]: E1006 21:32:06.487138 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.568915 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.568957 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.568966 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.568979 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.568987 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:06Z","lastTransitionTime":"2025-10-06T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.671089 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.671126 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.671136 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.671151 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.671189 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:06Z","lastTransitionTime":"2025-10-06T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.774220 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.774295 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.774315 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.774336 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.774352 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:06Z","lastTransitionTime":"2025-10-06T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.876910 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.876965 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.876975 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.876987 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.876995 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:06Z","lastTransitionTime":"2025-10-06T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.979597 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.979652 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.979662 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.979676 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:06 crc kubenswrapper[5014]: I1006 21:32:06.979685 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:06Z","lastTransitionTime":"2025-10-06T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.081925 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.081982 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.082002 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.082029 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.082046 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:07Z","lastTransitionTime":"2025-10-06T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.184506 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.184551 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.184562 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.184582 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.184591 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:07Z","lastTransitionTime":"2025-10-06T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.286800 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.286838 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.286871 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.286890 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.286902 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:07Z","lastTransitionTime":"2025-10-06T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.389452 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.389489 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.389502 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.389518 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.389530 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:07Z","lastTransitionTime":"2025-10-06T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.483550 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:07 crc kubenswrapper[5014]: E1006 21:32:07.483697 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.491410 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.491440 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.491450 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.491462 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.491475 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:07Z","lastTransitionTime":"2025-10-06T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.593751 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.593788 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.593799 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.593815 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.593827 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:07Z","lastTransitionTime":"2025-10-06T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.696785 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.696822 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.696830 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.696844 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.696854 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:07Z","lastTransitionTime":"2025-10-06T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.798631 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.798671 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.798682 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.798697 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.798706 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:07Z","lastTransitionTime":"2025-10-06T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.901269 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.901307 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.901317 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.901331 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:07 crc kubenswrapper[5014]: I1006 21:32:07.901342 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:07Z","lastTransitionTime":"2025-10-06T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.004098 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.004174 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.004194 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.004222 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.004240 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:08Z","lastTransitionTime":"2025-10-06T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.107959 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.108035 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.108056 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.108084 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.108107 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:08Z","lastTransitionTime":"2025-10-06T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.210850 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.210943 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.210980 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.211014 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.211037 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:08Z","lastTransitionTime":"2025-10-06T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.313793 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.313860 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.313877 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.313903 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.313922 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:08Z","lastTransitionTime":"2025-10-06T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.417210 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.417286 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.417307 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.417332 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.417350 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:08Z","lastTransitionTime":"2025-10-06T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.483727 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.483745 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.483676 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:08 crc kubenswrapper[5014]: E1006 21:32:08.483915 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:08 crc kubenswrapper[5014]: E1006 21:32:08.484316 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:08 crc kubenswrapper[5014]: E1006 21:32:08.484489 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.520855 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.520910 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.520923 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.520944 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.520958 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:08Z","lastTransitionTime":"2025-10-06T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.624544 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.624637 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.624653 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.624673 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.624686 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:08Z","lastTransitionTime":"2025-10-06T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.727471 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.727523 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.727558 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.727578 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.727591 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:08Z","lastTransitionTime":"2025-10-06T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.830463 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.830574 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.830658 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.830725 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.830749 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:08Z","lastTransitionTime":"2025-10-06T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.933262 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.933309 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.933321 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.933338 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:08 crc kubenswrapper[5014]: I1006 21:32:08.933351 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:08Z","lastTransitionTime":"2025-10-06T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.036489 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.036560 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.036594 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.036668 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.036693 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:09Z","lastTransitionTime":"2025-10-06T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.139396 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.139448 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.139457 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.139473 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.139484 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:09Z","lastTransitionTime":"2025-10-06T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.243200 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.243291 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.243311 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.243338 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.243359 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:09Z","lastTransitionTime":"2025-10-06T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.346983 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.347036 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.347049 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.347071 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.347085 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:09Z","lastTransitionTime":"2025-10-06T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.449467 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.449529 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.449547 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.449572 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.449589 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:09Z","lastTransitionTime":"2025-10-06T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.484493 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:09 crc kubenswrapper[5014]: E1006 21:32:09.484704 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.551910 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.551971 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.551988 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.552015 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.552034 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:09Z","lastTransitionTime":"2025-10-06T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.654397 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.654450 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.654470 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.654494 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.654514 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:09Z","lastTransitionTime":"2025-10-06T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.756453 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.756526 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.756561 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.756597 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.756662 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:09Z","lastTransitionTime":"2025-10-06T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.863611 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.863666 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.863680 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.863697 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.863709 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:09Z","lastTransitionTime":"2025-10-06T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.966543 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.966647 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.966666 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.966690 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:09 crc kubenswrapper[5014]: I1006 21:32:09.966709 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:09Z","lastTransitionTime":"2025-10-06T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.070076 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.070134 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.070150 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.070173 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.070190 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:10Z","lastTransitionTime":"2025-10-06T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.172353 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.172418 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.172440 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.172469 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.172492 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:10Z","lastTransitionTime":"2025-10-06T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.275589 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.275710 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.275731 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.275757 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.275773 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:10Z","lastTransitionTime":"2025-10-06T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.379558 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.379751 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.379823 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.379870 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.380082 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:10Z","lastTransitionTime":"2025-10-06T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.483419 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.483468 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.483465 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.483603 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.483485 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.483700 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.483757 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:10 crc kubenswrapper[5014]: E1006 21:32:10.483650 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.483789 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:10Z","lastTransitionTime":"2025-10-06T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:10 crc kubenswrapper[5014]: E1006 21:32:10.483873 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:10 crc kubenswrapper[5014]: E1006 21:32:10.484011 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.484994 5014 scope.go:117] "RemoveContainer" containerID="a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.586933 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.587422 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.587447 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.587474 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.587492 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:10Z","lastTransitionTime":"2025-10-06T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.691197 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.691253 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.691271 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.691295 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.691312 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:10Z","lastTransitionTime":"2025-10-06T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.793483 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.793522 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.793530 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.793545 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.793555 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:10Z","lastTransitionTime":"2025-10-06T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.896715 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.896769 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.896785 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.896808 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.896825 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:10Z","lastTransitionTime":"2025-10-06T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.999518 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.999558 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.999570 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:10 crc kubenswrapper[5014]: I1006 21:32:10.999588 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:10.999604 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:10Z","lastTransitionTime":"2025-10-06T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.039730 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovnkube-controller/2.log" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.042302 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerStarted","Data":"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828"} Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.042792 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.057372 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.072593 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.087419 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.102577 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.102671 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.102690 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.102716 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.102738 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:11Z","lastTransitionTime":"2025-10-06T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.103859 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.121207 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.138111 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.158172 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0cf1ba6616443cb4de3d3036bcacb1b1672bcd98fc50e2162c40b5fdfcb7583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:32:03Z\\\",\\\"message\\\":\\\"2025-10-06T21:31:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1973f2e1-c7e4-4ce1-9b23-e312a8ca1027\\\\n2025-10-06T21:31:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1973f2e1-c7e4-4ce1-9b23-e312a8ca1027 to /host/opt/cni/bin/\\\\n2025-10-06T21:31:18Z [verbose] multus-daemon started\\\\n2025-10-06T21:31:18Z [verbose] Readiness Indicator file check\\\\n2025-10-06T21:32:03Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:32:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.175131 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-chcf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-chcf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.204988 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.205796 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.205850 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.205868 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.205894 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.205913 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:11Z","lastTransitionTime":"2025-10-06T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.229660 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.248060 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.265386 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.287947 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.308859 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.308907 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.308918 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.308935 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.308946 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:11Z","lastTransitionTime":"2025-10-06T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.309649 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.332143 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9a0d7e7-de40-4e11-933d-563f96444a66\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1a3264bc2aabb1a0da4bd54eae121a419a29be7608fbd1811766ff00c8e123b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81f643a744f6ab40dd2ab7288d53704dabfc29d25088545333b7475bcabfeb79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a852a360b8208306c0db26edde70be0c758128fca997ba3a19cf6a3ceaf31240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.350726 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.372478 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.402865 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:42Z\\\",\\\"message\\\":\\\"6 21:31:42.503008 6641 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1006 21:31:42.503035 6641 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1006 21:31:42.503045 6641 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1006 21:31:42.503080 6641 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1006 21:31:42.503103 6641 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1006 21:31:42.503113 6641 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1006 21:31:42.503136 6641 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1006 21:31:42.503181 6641 factory.go:656] Stopping watch factory\\\\nI1006 21:31:42.503201 6641 ovnkube.go:599] Stopped ovnkube\\\\nI1006 21:31:42.503243 6641 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1006 21:31:42.503270 6641 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1006 21:31:42.503284 6641 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1006 21:31:42.503301 6641 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1006 21:31:42.503315 6641 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1006 21:31:42.503329 6641 handler.go:208] Removed *v1.Node event handler 2\\\\nI1006 21:31:42.503343 6641 handler.go:208] Removed *v1.Node event handler 7\\\\nI1006 21:31:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:32:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:11Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.411614 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.411686 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.411701 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.411724 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.411739 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:11Z","lastTransitionTime":"2025-10-06T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.484234 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:11 crc kubenswrapper[5014]: E1006 21:32:11.484555 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.543140 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.543194 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.543225 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.543242 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.543253 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:11Z","lastTransitionTime":"2025-10-06T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.653448 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.653538 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.653555 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.653586 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.653603 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:11Z","lastTransitionTime":"2025-10-06T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.757408 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.757484 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.757502 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.757528 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.757546 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:11Z","lastTransitionTime":"2025-10-06T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.861357 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.861455 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.861485 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.861515 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.861537 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:11Z","lastTransitionTime":"2025-10-06T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.965055 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.965117 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.965135 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.965352 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:11 crc kubenswrapper[5014]: I1006 21:32:11.965371 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:11Z","lastTransitionTime":"2025-10-06T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.049191 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovnkube-controller/3.log" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.050148 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovnkube-controller/2.log" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.054284 5014 generic.go:334] "Generic (PLEG): container finished" podID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerID="f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828" exitCode=1 Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.054349 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerDied","Data":"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828"} Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.054410 5014 scope.go:117] "RemoveContainer" containerID="a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.055690 5014 scope.go:117] "RemoveContainer" containerID="f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828" Oct 06 21:32:12 crc kubenswrapper[5014]: E1006 21:32:12.056018 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.067797 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.068012 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.068174 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.068316 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.068487 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:12Z","lastTransitionTime":"2025-10-06T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.076546 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9a0d7e7-de40-4e11-933d-563f96444a66\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1a3264bc2aabb1a0da4bd54eae121a419a29be7608fbd1811766ff00c8e123b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81f643a744f6ab40dd2ab7288d53704dabfc29d25088545333b7475bcabfeb79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a852a360b8208306c0db26edde70be0c758128fca997ba3a19cf6a3ceaf31240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.097539 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.117841 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.150288 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a4792144bb7e5504eb781ea7f14f97179213e7b979bfd274c73161d70e9f6f46\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:31:42Z\\\",\\\"message\\\":\\\"6 21:31:42.503008 6641 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1006 21:31:42.503035 6641 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1006 21:31:42.503045 6641 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1006 21:31:42.503080 6641 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1006 21:31:42.503103 6641 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1006 21:31:42.503113 6641 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1006 21:31:42.503136 6641 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1006 21:31:42.503181 6641 factory.go:656] Stopping watch factory\\\\nI1006 21:31:42.503201 6641 ovnkube.go:599] Stopped ovnkube\\\\nI1006 21:31:42.503243 6641 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1006 21:31:42.503270 6641 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1006 21:31:42.503284 6641 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1006 21:31:42.503301 6641 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1006 21:31:42.503315 6641 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1006 21:31:42.503329 6641 handler.go:208] Removed *v1.Node event handler 2\\\\nI1006 21:31:42.503343 6641 handler.go:208] Removed *v1.Node event handler 7\\\\nI1006 21:31:4\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:41Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:32:11Z\\\",\\\"message\\\":\\\"t:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.109\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1006 21:32:11.482957 6991 services_controller.go:454] Service openshift-ingress/router-internal-default for network=default has 3 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1006 21:32:11.482966 6991 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1006 21:32:11.483091 6991 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:32:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.170416 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0cf1ba6616443cb4de3d3036bcacb1b1672bcd98fc50e2162c40b5fdfcb7583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:32:03Z\\\",\\\"message\\\":\\\"2025-10-06T21:31:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1973f2e1-c7e4-4ce1-9b23-e312a8ca1027\\\\n2025-10-06T21:31:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1973f2e1-c7e4-4ce1-9b23-e312a8ca1027 to /host/opt/cni/bin/\\\\n2025-10-06T21:31:18Z [verbose] multus-daemon started\\\\n2025-10-06T21:31:18Z [verbose] Readiness Indicator file check\\\\n2025-10-06T21:32:03Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:32:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.172889 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.172943 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.172963 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.172989 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.173010 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:12Z","lastTransitionTime":"2025-10-06T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.198341 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.215910 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.223067 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.223124 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.223140 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.223165 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.223182 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:12Z","lastTransitionTime":"2025-10-06T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.235824 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: E1006 21:32:12.245324 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.250574 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.250827 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.250871 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.250904 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.250930 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:12Z","lastTransitionTime":"2025-10-06T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.258685 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: E1006 21:32:12.273078 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.278939 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.278978 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.278996 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.279019 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.279036 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:12Z","lastTransitionTime":"2025-10-06T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.281000 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.295893 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: E1006 21:32:12.298190 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.302732 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.302884 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.302987 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.303092 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.303193 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:12Z","lastTransitionTime":"2025-10-06T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.310599 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-chcf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-chcf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: E1006 21:32:12.321165 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.325571 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.325744 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.325845 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.325941 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.326029 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:12Z","lastTransitionTime":"2025-10-06T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.341673 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: E1006 21:32:12.344052 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: E1006 21:32:12.344611 5014 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.347672 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.347737 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.347759 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.347786 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.347815 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:12Z","lastTransitionTime":"2025-10-06T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.362070 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.376006 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.388479 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.407952 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.425095 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:12Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.450772 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.450863 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.450883 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.450909 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.450960 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:12Z","lastTransitionTime":"2025-10-06T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.483486 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.483564 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.483518 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:12 crc kubenswrapper[5014]: E1006 21:32:12.483742 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:12 crc kubenswrapper[5014]: E1006 21:32:12.483966 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:12 crc kubenswrapper[5014]: E1006 21:32:12.484300 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.554524 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.554593 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.554642 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.554670 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.554691 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:12Z","lastTransitionTime":"2025-10-06T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.657590 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.657654 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.657674 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.657695 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.657713 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:12Z","lastTransitionTime":"2025-10-06T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.760980 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.761067 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.761086 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.761119 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.761141 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:12Z","lastTransitionTime":"2025-10-06T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.865446 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.865539 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.865563 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.865595 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.865648 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:12Z","lastTransitionTime":"2025-10-06T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.969162 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.969239 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.969257 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.969288 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:12 crc kubenswrapper[5014]: I1006 21:32:12.969309 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:12Z","lastTransitionTime":"2025-10-06T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.062833 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovnkube-controller/3.log" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.068390 5014 scope.go:117] "RemoveContainer" containerID="f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828" Oct 06 21:32:13 crc kubenswrapper[5014]: E1006 21:32:13.068686 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.073032 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.073102 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.073128 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.073156 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.073181 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:13Z","lastTransitionTime":"2025-10-06T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.089747 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.122776 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:32:11Z\\\",\\\"message\\\":\\\"t:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.109\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1006 21:32:11.482957 6991 services_controller.go:454] Service openshift-ingress/router-internal-default for network=default has 3 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1006 21:32:11.482966 6991 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1006 21:32:11.483091 6991 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:32:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.143250 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9a0d7e7-de40-4e11-933d-563f96444a66\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1a3264bc2aabb1a0da4bd54eae121a419a29be7608fbd1811766ff00c8e123b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81f643a744f6ab40dd2ab7288d53704dabfc29d25088545333b7475bcabfeb79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a852a360b8208306c0db26edde70be0c758128fca997ba3a19cf6a3ceaf31240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.164555 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.176534 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.176655 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.176685 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.176726 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.176753 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:13Z","lastTransitionTime":"2025-10-06T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.188110 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.208124 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.226013 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.266683 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0cf1ba6616443cb4de3d3036bcacb1b1672bcd98fc50e2162c40b5fdfcb7583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:32:03Z\\\",\\\"message\\\":\\\"2025-10-06T21:31:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1973f2e1-c7e4-4ce1-9b23-e312a8ca1027\\\\n2025-10-06T21:31:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1973f2e1-c7e4-4ce1-9b23-e312a8ca1027 to /host/opt/cni/bin/\\\\n2025-10-06T21:31:18Z [verbose] multus-daemon started\\\\n2025-10-06T21:31:18Z [verbose] Readiness Indicator file check\\\\n2025-10-06T21:32:03Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:32:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.279744 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.280020 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.280139 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.280232 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.280327 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:13Z","lastTransitionTime":"2025-10-06T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.304822 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.320535 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.337503 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.354705 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-chcf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-chcf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.370691 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.383136 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.383285 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.383349 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.383413 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.383472 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:13Z","lastTransitionTime":"2025-10-06T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.386315 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.417653 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.436494 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.457067 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.480118 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:13Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.484661 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:13 crc kubenswrapper[5014]: E1006 21:32:13.484891 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.486834 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.486893 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.486915 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.486942 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.486965 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:13Z","lastTransitionTime":"2025-10-06T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.590328 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.590397 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.590415 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.590438 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.590455 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:13Z","lastTransitionTime":"2025-10-06T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.692965 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.693030 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.693054 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.693085 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.693107 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:13Z","lastTransitionTime":"2025-10-06T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.796405 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.796462 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.796480 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.796505 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.796521 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:13Z","lastTransitionTime":"2025-10-06T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.899230 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.899315 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.899340 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.899370 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:13 crc kubenswrapper[5014]: I1006 21:32:13.899390 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:13Z","lastTransitionTime":"2025-10-06T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.002248 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.002312 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.002328 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.002355 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.002374 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:14Z","lastTransitionTime":"2025-10-06T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.105082 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.105140 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.105159 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.105185 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.105203 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:14Z","lastTransitionTime":"2025-10-06T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.208168 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.208248 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.208272 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.208297 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.208314 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:14Z","lastTransitionTime":"2025-10-06T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.311792 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.311847 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.311869 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.311898 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.311921 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:14Z","lastTransitionTime":"2025-10-06T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.415459 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.415530 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.415548 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.415574 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.415597 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:14Z","lastTransitionTime":"2025-10-06T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.484013 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.484068 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.484039 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:14 crc kubenswrapper[5014]: E1006 21:32:14.484212 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:14 crc kubenswrapper[5014]: E1006 21:32:14.484409 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:14 crc kubenswrapper[5014]: E1006 21:32:14.484526 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.518585 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.518661 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.518684 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.518708 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.518726 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:14Z","lastTransitionTime":"2025-10-06T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.621599 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.621698 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.621717 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.621740 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.621758 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:14Z","lastTransitionTime":"2025-10-06T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.724367 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.724847 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.725095 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.725351 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.725564 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:14Z","lastTransitionTime":"2025-10-06T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.829362 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.829425 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.829449 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.829476 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.829497 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:14Z","lastTransitionTime":"2025-10-06T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.932447 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.932506 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.932523 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.932547 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:14 crc kubenswrapper[5014]: I1006 21:32:14.932566 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:14Z","lastTransitionTime":"2025-10-06T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.036229 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.036295 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.036319 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.036353 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.036378 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:15Z","lastTransitionTime":"2025-10-06T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.139457 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.139517 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.139536 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.139566 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.139589 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:15Z","lastTransitionTime":"2025-10-06T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.242166 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.242222 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.242239 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.242262 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.242282 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:15Z","lastTransitionTime":"2025-10-06T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.345443 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.345501 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.345518 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.345542 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.345559 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:15Z","lastTransitionTime":"2025-10-06T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.448383 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.448423 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.448435 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.448451 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.448464 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:15Z","lastTransitionTime":"2025-10-06T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.483957 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:15 crc kubenswrapper[5014]: E1006 21:32:15.484238 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.502176 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.519429 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.551592 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.551709 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.551769 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.551795 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.551852 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:15Z","lastTransitionTime":"2025-10-06T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.557048 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.577776 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.602775 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.622683 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.644500 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.657575 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.657614 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.657664 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.657682 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.657695 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:15Z","lastTransitionTime":"2025-10-06T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.678019 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:32:11Z\\\",\\\"message\\\":\\\"t:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.109\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1006 21:32:11.482957 6991 services_controller.go:454] Service openshift-ingress/router-internal-default for network=default has 3 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1006 21:32:11.482966 6991 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1006 21:32:11.483091 6991 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:32:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.696181 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9a0d7e7-de40-4e11-933d-563f96444a66\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1a3264bc2aabb1a0da4bd54eae121a419a29be7608fbd1811766ff00c8e123b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81f643a744f6ab40dd2ab7288d53704dabfc29d25088545333b7475bcabfeb79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a852a360b8208306c0db26edde70be0c758128fca997ba3a19cf6a3ceaf31240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.710582 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.732204 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.750168 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.761338 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.761445 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.761506 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.761538 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.761560 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:15Z","lastTransitionTime":"2025-10-06T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.766020 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.784426 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0cf1ba6616443cb4de3d3036bcacb1b1672bcd98fc50e2162c40b5fdfcb7583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:32:03Z\\\",\\\"message\\\":\\\"2025-10-06T21:31:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1973f2e1-c7e4-4ce1-9b23-e312a8ca1027\\\\n2025-10-06T21:31:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1973f2e1-c7e4-4ce1-9b23-e312a8ca1027 to /host/opt/cni/bin/\\\\n2025-10-06T21:31:18Z [verbose] multus-daemon started\\\\n2025-10-06T21:31:18Z [verbose] Readiness Indicator file check\\\\n2025-10-06T21:32:03Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:32:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.806407 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.824225 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.846047 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.861712 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-chcf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-chcf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:15Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.864037 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.864093 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.864113 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.864137 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.864154 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:15Z","lastTransitionTime":"2025-10-06T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.967541 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.967614 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.967670 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.967700 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:15 crc kubenswrapper[5014]: I1006 21:32:15.967724 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:15Z","lastTransitionTime":"2025-10-06T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.071152 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.071212 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.071231 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.071258 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.071277 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:16Z","lastTransitionTime":"2025-10-06T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.174696 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.174762 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.174779 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.174803 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.174819 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:16Z","lastTransitionTime":"2025-10-06T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.277963 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.278023 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.278041 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.278067 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.278087 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:16Z","lastTransitionTime":"2025-10-06T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.381713 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.381780 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.381799 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.381827 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.381845 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:16Z","lastTransitionTime":"2025-10-06T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.484181 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.484287 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.484364 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:16 crc kubenswrapper[5014]: E1006 21:32:16.484500 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.484759 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.484790 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.484807 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.484828 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.484844 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:16Z","lastTransitionTime":"2025-10-06T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:16 crc kubenswrapper[5014]: E1006 21:32:16.484976 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:16 crc kubenswrapper[5014]: E1006 21:32:16.485102 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.503353 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.587415 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.587468 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.587486 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.587508 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.587525 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:16Z","lastTransitionTime":"2025-10-06T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.690790 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.690843 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.690862 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.690884 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.690902 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:16Z","lastTransitionTime":"2025-10-06T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.793215 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.793264 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.793280 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.793302 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.793321 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:16Z","lastTransitionTime":"2025-10-06T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.896164 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.896222 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.896241 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.896269 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.896288 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:16Z","lastTransitionTime":"2025-10-06T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.998819 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.998901 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.998924 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.998958 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:16 crc kubenswrapper[5014]: I1006 21:32:16.998982 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:16Z","lastTransitionTime":"2025-10-06T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.101907 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.101954 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.101972 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.101995 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.102012 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:17Z","lastTransitionTime":"2025-10-06T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.205057 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.205116 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.205136 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.205161 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.205178 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:17Z","lastTransitionTime":"2025-10-06T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.307942 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.307991 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.308010 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.308033 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.308050 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:17Z","lastTransitionTime":"2025-10-06T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.410071 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.410101 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.410109 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.410127 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.410135 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:17Z","lastTransitionTime":"2025-10-06T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.484184 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:17 crc kubenswrapper[5014]: E1006 21:32:17.484384 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.512773 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.512834 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.512854 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.512875 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.512892 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:17Z","lastTransitionTime":"2025-10-06T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.616349 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.616399 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.616414 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.616432 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.616447 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:17Z","lastTransitionTime":"2025-10-06T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.720085 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.720144 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.720163 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.720190 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.720209 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:17Z","lastTransitionTime":"2025-10-06T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.823606 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.823703 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.823726 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.823757 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.823781 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:17Z","lastTransitionTime":"2025-10-06T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.927977 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.928048 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.928066 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.928094 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:17 crc kubenswrapper[5014]: I1006 21:32:17.928114 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:17Z","lastTransitionTime":"2025-10-06T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.031822 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.031888 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.031905 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.031930 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.031950 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:18Z","lastTransitionTime":"2025-10-06T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.135393 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.135475 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.135500 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.135528 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.135546 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:18Z","lastTransitionTime":"2025-10-06T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.238541 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.238608 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.238656 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.238681 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.238698 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:18Z","lastTransitionTime":"2025-10-06T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.341716 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.341787 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.341812 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.341844 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.341868 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:18Z","lastTransitionTime":"2025-10-06T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.445051 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.445134 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.445155 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.445181 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.445232 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:18Z","lastTransitionTime":"2025-10-06T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.447573 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:32:18 crc kubenswrapper[5014]: E1006 21:32:18.447753 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.447720268 +0000 UTC m=+147.740757042 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.484337 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.484349 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.484394 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:18 crc kubenswrapper[5014]: E1006 21:32:18.484679 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:18 crc kubenswrapper[5014]: E1006 21:32:18.484856 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:18 crc kubenswrapper[5014]: E1006 21:32:18.485093 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.548161 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.548233 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.548259 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.548293 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.548317 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:18Z","lastTransitionTime":"2025-10-06T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.548853 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.548961 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.549005 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.549043 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:18 crc kubenswrapper[5014]: E1006 21:32:18.549070 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 06 21:32:18 crc kubenswrapper[5014]: E1006 21:32:18.549114 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 06 21:32:18 crc kubenswrapper[5014]: E1006 21:32:18.549213 5014 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:32:18 crc kubenswrapper[5014]: E1006 21:32:18.549213 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 06 21:32:18 crc kubenswrapper[5014]: E1006 21:32:18.549255 5014 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 06 21:32:18 crc kubenswrapper[5014]: E1006 21:32:18.549277 5014 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:32:18 crc kubenswrapper[5014]: E1006 21:32:18.549294 5014 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 06 21:32:18 crc kubenswrapper[5014]: E1006 21:32:18.549352 5014 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 06 21:32:18 crc kubenswrapper[5014]: E1006 21:32:18.549320 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.549293461 +0000 UTC m=+147.842330225 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:32:18 crc kubenswrapper[5014]: E1006 21:32:18.550052 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.550031937 +0000 UTC m=+147.843068711 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 06 21:32:18 crc kubenswrapper[5014]: E1006 21:32:18.550113 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.550096259 +0000 UTC m=+147.843133023 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 06 21:32:18 crc kubenswrapper[5014]: E1006 21:32:18.550156 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.550144061 +0000 UTC m=+147.843180835 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.652204 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.652303 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.652327 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.652361 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.652385 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:18Z","lastTransitionTime":"2025-10-06T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.755819 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.755859 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.755867 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.755880 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.755889 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:18Z","lastTransitionTime":"2025-10-06T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.857969 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.858031 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.858051 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.858077 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.858095 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:18Z","lastTransitionTime":"2025-10-06T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.961725 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.961792 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.961812 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.961836 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:18 crc kubenswrapper[5014]: I1006 21:32:18.961860 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:18Z","lastTransitionTime":"2025-10-06T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.064683 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.064750 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.064767 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.064798 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.064815 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:19Z","lastTransitionTime":"2025-10-06T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.167102 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.167190 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.167207 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.167231 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.167249 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:19Z","lastTransitionTime":"2025-10-06T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.269972 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.270025 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.270048 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.270074 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.270097 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:19Z","lastTransitionTime":"2025-10-06T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.372818 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.372897 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.372916 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.372942 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.372962 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:19Z","lastTransitionTime":"2025-10-06T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.475824 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.475891 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.475907 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.475932 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.475949 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:19Z","lastTransitionTime":"2025-10-06T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.484307 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:19 crc kubenswrapper[5014]: E1006 21:32:19.484501 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.580561 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.580720 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.580743 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.580771 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.580799 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:19Z","lastTransitionTime":"2025-10-06T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.685082 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.685501 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.685520 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.685543 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.685563 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:19Z","lastTransitionTime":"2025-10-06T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.788446 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.788492 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.788508 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.788561 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.788578 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:19Z","lastTransitionTime":"2025-10-06T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.892080 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.892139 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.892158 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.892181 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.892198 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:19Z","lastTransitionTime":"2025-10-06T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.995467 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.995524 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.995543 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.995572 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:19 crc kubenswrapper[5014]: I1006 21:32:19.995595 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:19Z","lastTransitionTime":"2025-10-06T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.098739 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.098802 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.098821 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.098846 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.098865 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:20Z","lastTransitionTime":"2025-10-06T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.201766 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.202735 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.202775 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.202801 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.202819 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:20Z","lastTransitionTime":"2025-10-06T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.306337 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.306404 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.306427 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.306459 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.306483 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:20Z","lastTransitionTime":"2025-10-06T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.409314 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.409403 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.409440 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.409470 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.409491 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:20Z","lastTransitionTime":"2025-10-06T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.483584 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.483710 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.483589 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:20 crc kubenswrapper[5014]: E1006 21:32:20.483802 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:20 crc kubenswrapper[5014]: E1006 21:32:20.483907 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:20 crc kubenswrapper[5014]: E1006 21:32:20.484067 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.513276 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.513329 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.513346 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.513368 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.513387 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:20Z","lastTransitionTime":"2025-10-06T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.617147 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.617278 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.617306 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.617331 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.617349 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:20Z","lastTransitionTime":"2025-10-06T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.720106 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.720165 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.720182 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.720206 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.720225 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:20Z","lastTransitionTime":"2025-10-06T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.822566 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.822689 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.822708 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.822734 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.822751 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:20Z","lastTransitionTime":"2025-10-06T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.925732 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.925808 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.925832 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.926727 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:20 crc kubenswrapper[5014]: I1006 21:32:20.926929 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:20Z","lastTransitionTime":"2025-10-06T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.029676 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.030045 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.030243 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.030451 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.030702 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:21Z","lastTransitionTime":"2025-10-06T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.133942 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.134301 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.134478 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.134697 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.134875 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:21Z","lastTransitionTime":"2025-10-06T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.238048 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.238174 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.238193 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.238218 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.238235 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:21Z","lastTransitionTime":"2025-10-06T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.340955 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.341019 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.341037 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.341060 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.341079 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:21Z","lastTransitionTime":"2025-10-06T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.444164 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.444225 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.444250 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.444278 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.444301 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:21Z","lastTransitionTime":"2025-10-06T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.484175 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:21 crc kubenswrapper[5014]: E1006 21:32:21.484407 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.547587 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.547762 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.547788 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.547820 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.547844 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:21Z","lastTransitionTime":"2025-10-06T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.651661 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.651747 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.651772 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.651802 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.651824 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:21Z","lastTransitionTime":"2025-10-06T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.755456 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.755511 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.755564 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.755588 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.755608 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:21Z","lastTransitionTime":"2025-10-06T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.858178 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.858268 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.858292 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.858326 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.858352 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:21Z","lastTransitionTime":"2025-10-06T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.961445 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.961516 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.961541 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.961586 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:21 crc kubenswrapper[5014]: I1006 21:32:21.961613 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:21Z","lastTransitionTime":"2025-10-06T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.064240 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.064292 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.064309 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.064327 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.064339 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:22Z","lastTransitionTime":"2025-10-06T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.167158 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.167218 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.167237 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.167262 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.167281 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:22Z","lastTransitionTime":"2025-10-06T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.271048 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.271114 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.271131 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.271155 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.271173 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:22Z","lastTransitionTime":"2025-10-06T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.374009 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.374071 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.374088 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.374111 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.374139 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:22Z","lastTransitionTime":"2025-10-06T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.477765 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.477852 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.477875 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.477905 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.477928 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:22Z","lastTransitionTime":"2025-10-06T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.483412 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.483446 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.483499 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:22 crc kubenswrapper[5014]: E1006 21:32:22.483614 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:22 crc kubenswrapper[5014]: E1006 21:32:22.483791 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:22 crc kubenswrapper[5014]: E1006 21:32:22.483952 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.580854 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.580920 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.580940 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.580964 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.580982 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:22Z","lastTransitionTime":"2025-10-06T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.683930 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.683992 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.684015 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.684044 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.684064 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:22Z","lastTransitionTime":"2025-10-06T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.717494 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.717550 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.717568 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.717592 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.717612 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:22Z","lastTransitionTime":"2025-10-06T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:22 crc kubenswrapper[5014]: E1006 21:32:22.738803 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.744089 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.744147 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.744172 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.744202 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.744229 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:22Z","lastTransitionTime":"2025-10-06T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:22 crc kubenswrapper[5014]: E1006 21:32:22.764535 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.769161 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.769212 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.769227 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.769251 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.769273 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:22Z","lastTransitionTime":"2025-10-06T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:22 crc kubenswrapper[5014]: E1006 21:32:22.789121 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.793977 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.794031 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.794050 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.794073 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.794091 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:22Z","lastTransitionTime":"2025-10-06T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:22 crc kubenswrapper[5014]: E1006 21:32:22.814867 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.820334 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.820383 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.820400 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.820425 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.820442 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:22Z","lastTransitionTime":"2025-10-06T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:22 crc kubenswrapper[5014]: E1006 21:32:22.841283 5014 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d3fb3b65-6228-438f-aaa3-137984163b6d\\\",\\\"systemUUID\\\":\\\"d96e7155-4816-42b4-95d2-01738aa57d05\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:22Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:22 crc kubenswrapper[5014]: E1006 21:32:22.841510 5014 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.843860 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.843920 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.843943 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.843970 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.843992 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:22Z","lastTransitionTime":"2025-10-06T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.947183 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.947254 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.947276 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.947301 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:22 crc kubenswrapper[5014]: I1006 21:32:22.947319 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:22Z","lastTransitionTime":"2025-10-06T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.055901 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.055966 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.055990 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.056020 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.056044 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:23Z","lastTransitionTime":"2025-10-06T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.159150 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.159223 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.159240 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.159265 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.159282 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:23Z","lastTransitionTime":"2025-10-06T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.263260 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.263332 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.263349 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.263373 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.263392 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:23Z","lastTransitionTime":"2025-10-06T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.366930 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.367010 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.367029 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.367053 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.367070 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:23Z","lastTransitionTime":"2025-10-06T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.470264 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.470330 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.470355 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.470386 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.470411 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:23Z","lastTransitionTime":"2025-10-06T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.484014 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:23 crc kubenswrapper[5014]: E1006 21:32:23.484258 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.574194 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.574262 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.574281 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.574306 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.574326 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:23Z","lastTransitionTime":"2025-10-06T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.677211 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.677265 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.677283 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.677307 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.677327 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:23Z","lastTransitionTime":"2025-10-06T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.779550 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.779663 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.779684 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.779709 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.779727 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:23Z","lastTransitionTime":"2025-10-06T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.882834 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.882913 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.882934 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.882957 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.882975 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:23Z","lastTransitionTime":"2025-10-06T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.989585 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.989684 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.989709 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.989767 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:23 crc kubenswrapper[5014]: I1006 21:32:23.989799 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:23Z","lastTransitionTime":"2025-10-06T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.093399 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.093471 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.093490 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.093514 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.093532 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:24Z","lastTransitionTime":"2025-10-06T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.197755 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.197845 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.197870 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.197898 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.197920 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:24Z","lastTransitionTime":"2025-10-06T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.300868 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.300953 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.300969 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.300993 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.301011 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:24Z","lastTransitionTime":"2025-10-06T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.403580 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.403698 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.403720 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.403747 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.403768 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:24Z","lastTransitionTime":"2025-10-06T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.483749 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.483749 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:24 crc kubenswrapper[5014]: E1006 21:32:24.483928 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.483765 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:24 crc kubenswrapper[5014]: E1006 21:32:24.484167 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:24 crc kubenswrapper[5014]: E1006 21:32:24.484234 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.507246 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.507334 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.507357 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.507387 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.507410 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:24Z","lastTransitionTime":"2025-10-06T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.611186 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.611239 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.611255 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.611277 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.611297 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:24Z","lastTransitionTime":"2025-10-06T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.714607 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.714710 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.714737 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.714767 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.714789 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:24Z","lastTransitionTime":"2025-10-06T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.817397 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.817475 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.817498 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.817523 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.817541 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:24Z","lastTransitionTime":"2025-10-06T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.920829 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.920903 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.920929 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.920958 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:24 crc kubenswrapper[5014]: I1006 21:32:24.920980 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:24Z","lastTransitionTime":"2025-10-06T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.024933 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.025008 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.025026 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.025052 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.025069 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:25Z","lastTransitionTime":"2025-10-06T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.127878 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.127991 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.128014 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.128041 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.128058 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:25Z","lastTransitionTime":"2025-10-06T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.231548 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.231601 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.231648 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.231682 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.231703 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:25Z","lastTransitionTime":"2025-10-06T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.334067 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.334132 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.334149 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.334174 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.334192 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:25Z","lastTransitionTime":"2025-10-06T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.436829 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.436886 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.436904 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.436927 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.436944 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:25Z","lastTransitionTime":"2025-10-06T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.484587 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:25 crc kubenswrapper[5014]: E1006 21:32:25.484813 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.486131 5014 scope.go:117] "RemoveContainer" containerID="f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828" Oct 06 21:32:25 crc kubenswrapper[5014]: E1006 21:32:25.486368 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.519067 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06547623-0e4a-4d2d-b655-65f380595746\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b82bc9fd213761bfd30a658467309e83fdca370b55d96b231704a5f3d94a0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a3638a4c9c5992291c323aba026f1672e69496f114ca25f1634c01a8fb73fbf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2001f6726e794ce7842e273d97b7ffb39e4a463a670295365df7e49e6148367d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://722036b79c1b36be1fca639f415c95ed072dca309995117b7ad8622cbcad6cf9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57a1153b62344a0b2dc657bca583be0408ca2ab8a8093c133e3407ef46a79bab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b039129873beec607747a23f41297b14eb8a40d66317d7e1b248635098ca2ed2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d53a7407ec098ccfa82858659d50e75aff2538c226b5b5eb49a3ad69fd949313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28d0b4c8f527e95900ba136f3b97dac4bb79c50c5c1adb03e7726b1f9b482d78\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.539500 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.540194 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.540236 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.540260 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.540288 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.540311 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:25Z","lastTransitionTime":"2025-10-06T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.558577 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1de88713c6646e5077b190e5fdc4e156c515a0238548bb0d308f04de3abf1306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.573754 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-r8bdz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"297c0e39-f2c1-4708-a2ba-cb9576086924\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea67be003ca8539ccd77d918fea2a0db7be11a79d4b65f6be5d6226974a50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d55hg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-r8bdz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.596298 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b3844dd-2dc3-429a-8a98-f3cd7db5ca85\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0c189f98ec0fe15f29de87ea8e6ef3369b345863a7a1810343ed95790e4f9e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94f1a38f146a027c0d2764c41437e0251e0a59cf8ac0e068964f065281720099\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://833f2882d4d77de84a681e045b4be2cd39513bbca8c9b8f0d58f17ed4f7dfa56\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://40133117fb46a11fc411ad68eb8fa64f93fc9f69702de803c98d17143894192f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8ac5af4c15eceaa1c98d80c302fd9ca9e13d1e6ae475a8565b0776535556875\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"ized mutatingChan\\\\\\\" len=200\\\\nI1006 21:31:14.420279 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1006 21:31:14.420434 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1006 21:31:14.430338 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1006 21:31:14.430377 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430383 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1006 21:31:14.430391 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1006 21:31:14.430401 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1006 21:31:14.430405 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1006 21:31:14.430410 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1006 21:31:14.430662 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI1006 21:31:14.433425 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\"\\\\nI1006 21:31:14.433541 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433572 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1006 21:31:14.433588 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-348314667/tls.crt::/tmp/serving-cert-348314667/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1759786258\\\\\\\\\\\\\\\" (2025-10-06 21:30:58 +0000 UTC to 2025-11-05 21:30:59 +0000 UTC (now=2025-10-06 21:31:14.433538461 +0000 UTC))\\\\\\\"\\\\nF1006 21:31:14.434404 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d4511086f0418b0e3cf64ba2b1a602b3716c9b244479e9c99eb2aa5c1a671fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d331cb089e97dd80c086481e8088605eba6453afdfd39ed592174e59c18120c7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.616754 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33478e0f-9143-4e11-96a1-04c53f0f6277\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b18d4ad866ac0ebfd8a1c3d7fb09c05c45c780ae926d4f547b2c13dc79bc30a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9qn2z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-6bths\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.635266 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e9a0d7e7-de40-4e11-933d-563f96444a66\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1a3264bc2aabb1a0da4bd54eae121a419a29be7608fbd1811766ff00c8e123b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81f643a744f6ab40dd2ab7288d53704dabfc29d25088545333b7475bcabfeb79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a852a360b8208306c0db26edde70be0c758128fca997ba3a19cf6a3ceaf31240\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e836b1b3b94eec0367b7220dd5b80f2f397b8cc35c436541737e810bf76fac22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.643544 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.643606 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.643654 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.643681 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.643699 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:25Z","lastTransitionTime":"2025-10-06T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.650892 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.671119 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a91cebb613afa35ff5b160c50820990bcc81c4da534a27f9e9f34f00ab2a8b03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9411afc4cfe519f66dc3d03b0c1172fc18ad371820fe807dd72ec71c170d9b9f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.699983 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d2de4ac-a423-4f5a-904a-817553f204f6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:32:11Z\\\",\\\"message\\\":\\\"t:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.109\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1006 21:32:11.482957 6991 services_controller.go:454] Service openshift-ingress/router-internal-default for network=default has 3 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1006 21:32:11.482966 6991 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1006 21:32:11.483091 6991 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:32:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gcbj4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2wj75\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.723065 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f5c94fe2-2a02-4515-bc83-234827e59e4f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://723dca787a53b04bc967379b45cfc27f94c7f9326ea511656a07ace98b87a3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce6925135cb0f9c038867b38f60f311115cce5a6178c0d01ac8de4480887491c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f823e06a9de5f0ae72faafc9d6514bacb796e9b3d161a2d76f69cbfd6a18168\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41ed9b4c08a19862993f4d844d5f8b5563674b4720ec704cc39778a00d6bf694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c85cd6fdc3a56ca6f4ef8c4af220919d5b97e3711d7a52725da78d782ade0c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f781ad762503b15a1e4f810649a257309240f1f111150840b8fce47e0a9ca408\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38c014e59b90c520c58cdcba970891436d2302dbe5d5bbf3b32cafd964390f09\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p22wl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gqd7v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.744058 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f43a657-4080-406d-8736-6520c72d1a97\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f450daac07cbe574707485491303a360ea8b1395520faa4d7b5ea6729f7b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://51e3420f88bc03adf5a62e72146aabc7074c4f257ca256bd4103da8612cbc31a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-59sjn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bj245\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.747335 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.747427 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.747452 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.747485 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.747509 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:25Z","lastTransitionTime":"2025-10-06T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.760882 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b55c256e-5f19-4d53-bd51-3b5c4422e8ee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dda4a7a92488e6ff8860810aa9163e98bffc9dc8c7ffe77e9e85d577af45202a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9497062cd310a2e43ec77ff8cfc3b406613f5e4ff90d676be93045025290e3af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9497062cd310a2e43ec77ff8cfc3b406613f5e4ff90d676be93045025290e3af\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-06T21:30:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.782494 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3341010-c79a-4e46-bb44-576516a36260\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:30:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89c443c97fffabd62ab0aafde5f064eb3fecaa83b3d2a5135aa7ae386c16d0d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://80808a92b508a8a7069e9cdfdaf737d0a0a8040f03afde579083287542a1528e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f90071ecfcceee56acdd07f981b3780eec34a0825a7f8491708f09fa7be7986f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://76bf5658162beeaa41d71b901198a5c02c9955d7c9d5b654f0d25939c63e6904\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:30:55Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.803588 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://31f59188957404daf818358d9d8bf5f24f07b13ad898f02e04a69d891d7c6173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.844607 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.850700 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.850732 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.850744 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.850759 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.850773 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:25Z","lastTransitionTime":"2025-10-06T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.861100 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-rmjtc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c53c82a2-7b51-49c1-88f4-fdb5df783712\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://80dc1ca01c8603b09e20e4bda7f31185e178182727662ec54ac26c3c408a839a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cn8pk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:15Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-rmjtc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.876694 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8ddbf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f1464a5-d713-4f79-8248-33c69abcdac2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:32:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0cf1ba6616443cb4de3d3036bcacb1b1672bcd98fc50e2162c40b5fdfcb7583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-06T21:32:03Z\\\",\\\"message\\\":\\\"2025-10-06T21:31:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1973f2e1-c7e4-4ce1-9b23-e312a8ca1027\\\\n2025-10-06T21:31:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1973f2e1-c7e4-4ce1-9b23-e312a8ca1027 to /host/opt/cni/bin/\\\\n2025-10-06T21:31:18Z [verbose] multus-daemon started\\\\n2025-10-06T21:31:18Z [verbose] Readiness Indicator file check\\\\n2025-10-06T21:32:03Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-06T21:31:16Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-06T21:32:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-89vs2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:16Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8ddbf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.892685 5014 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-chcf6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-06T21:31:30Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xhgks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-06T21:31:30Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-chcf6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-06T21:32:25Z is after 2025-08-24T17:21:41Z" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.953790 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.954234 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.954389 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.954557 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:25 crc kubenswrapper[5014]: I1006 21:32:25.954728 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:25Z","lastTransitionTime":"2025-10-06T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.058426 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.058500 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.058524 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.058555 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.058592 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:26Z","lastTransitionTime":"2025-10-06T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.161670 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.161739 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.161756 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.161781 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.161798 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:26Z","lastTransitionTime":"2025-10-06T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.265066 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.265141 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.265164 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.265187 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.265204 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:26Z","lastTransitionTime":"2025-10-06T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.367302 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.367367 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.367386 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.367410 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.367429 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:26Z","lastTransitionTime":"2025-10-06T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.470793 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.470852 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.470869 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.470892 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.470909 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:26Z","lastTransitionTime":"2025-10-06T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.483685 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.483930 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.484006 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:26 crc kubenswrapper[5014]: E1006 21:32:26.484121 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:26 crc kubenswrapper[5014]: E1006 21:32:26.484489 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:26 crc kubenswrapper[5014]: E1006 21:32:26.484710 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.573969 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.574040 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.574058 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.574091 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.574110 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:26Z","lastTransitionTime":"2025-10-06T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.677392 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.677454 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.677472 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.677494 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.677514 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:26Z","lastTransitionTime":"2025-10-06T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.780436 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.780472 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.780480 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.780493 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.780501 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:26Z","lastTransitionTime":"2025-10-06T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.884035 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.884113 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.884132 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.884157 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.884177 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:26Z","lastTransitionTime":"2025-10-06T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.987660 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.987729 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.987753 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.987781 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:26 crc kubenswrapper[5014]: I1006 21:32:26.987805 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:26Z","lastTransitionTime":"2025-10-06T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.091135 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.091203 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.091232 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.091259 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.091277 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:27Z","lastTransitionTime":"2025-10-06T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.193570 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.193660 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.193680 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.193702 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.193719 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:27Z","lastTransitionTime":"2025-10-06T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.296507 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.296581 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.296604 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.296668 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.296705 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:27Z","lastTransitionTime":"2025-10-06T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.399824 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.399895 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.399919 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.399952 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.399976 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:27Z","lastTransitionTime":"2025-10-06T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.483965 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:27 crc kubenswrapper[5014]: E1006 21:32:27.484190 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.502435 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.502597 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.502698 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.502724 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.502743 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:27Z","lastTransitionTime":"2025-10-06T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.606536 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.606612 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.606643 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.606665 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.606680 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:27Z","lastTransitionTime":"2025-10-06T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.710236 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.710311 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.710334 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.710365 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.710390 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:27Z","lastTransitionTime":"2025-10-06T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.814052 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.814127 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.814153 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.814186 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.814204 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:27Z","lastTransitionTime":"2025-10-06T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.918496 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.918558 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.918575 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.918598 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:27 crc kubenswrapper[5014]: I1006 21:32:27.918638 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:27Z","lastTransitionTime":"2025-10-06T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.021199 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.021240 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.021249 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.021285 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.021302 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:28Z","lastTransitionTime":"2025-10-06T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.124942 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.124988 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.125007 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.125027 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.125042 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:28Z","lastTransitionTime":"2025-10-06T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.228612 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.228729 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.228749 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.228781 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.228803 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:28Z","lastTransitionTime":"2025-10-06T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.331543 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.331591 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.331611 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.331660 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.331679 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:28Z","lastTransitionTime":"2025-10-06T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.434754 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.434807 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.434824 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.434848 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.434864 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:28Z","lastTransitionTime":"2025-10-06T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.483904 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.483967 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.484049 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:28 crc kubenswrapper[5014]: E1006 21:32:28.484201 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:28 crc kubenswrapper[5014]: E1006 21:32:28.484309 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:28 crc kubenswrapper[5014]: E1006 21:32:28.484569 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.538002 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.538092 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.538117 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.538143 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.538163 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:28Z","lastTransitionTime":"2025-10-06T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.641779 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.641884 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.641901 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.641929 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.641949 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:28Z","lastTransitionTime":"2025-10-06T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.745713 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.745782 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.745797 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.745823 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.745841 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:28Z","lastTransitionTime":"2025-10-06T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.849161 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.849227 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.849245 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.849274 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.849293 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:28Z","lastTransitionTime":"2025-10-06T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.951962 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.952015 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.952057 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.952101 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:28 crc kubenswrapper[5014]: I1006 21:32:28.952117 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:28Z","lastTransitionTime":"2025-10-06T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.054988 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.055607 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.055804 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.056029 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.056179 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:29Z","lastTransitionTime":"2025-10-06T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.158998 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.159086 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.159108 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.159143 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.159166 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:29Z","lastTransitionTime":"2025-10-06T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.262689 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.263155 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.263238 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.263339 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.263450 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:29Z","lastTransitionTime":"2025-10-06T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.367070 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.367134 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.367148 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.367173 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.367192 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:29Z","lastTransitionTime":"2025-10-06T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.470141 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.470226 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.470244 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.470272 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.470293 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:29Z","lastTransitionTime":"2025-10-06T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.484222 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:29 crc kubenswrapper[5014]: E1006 21:32:29.484562 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.573590 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.573701 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.573722 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.573756 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.573773 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:29Z","lastTransitionTime":"2025-10-06T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.677059 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.677119 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.677137 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.677160 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.677177 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:29Z","lastTransitionTime":"2025-10-06T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.780728 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.780792 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.780810 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.780835 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.780854 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:29Z","lastTransitionTime":"2025-10-06T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.884547 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.884660 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.884687 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.884716 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.884738 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:29Z","lastTransitionTime":"2025-10-06T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.987879 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.987929 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.987961 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.987991 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:29 crc kubenswrapper[5014]: I1006 21:32:29.988002 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:29Z","lastTransitionTime":"2025-10-06T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.091870 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.091940 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.091964 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.091989 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.092007 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:30Z","lastTransitionTime":"2025-10-06T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.195208 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.195309 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.195328 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.195353 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.195372 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:30Z","lastTransitionTime":"2025-10-06T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.300896 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.300956 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.300971 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.300994 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.301008 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:30Z","lastTransitionTime":"2025-10-06T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.406419 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.406522 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.406544 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.406608 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.406662 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:30Z","lastTransitionTime":"2025-10-06T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.484310 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:30 crc kubenswrapper[5014]: E1006 21:32:30.484581 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.484930 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:30 crc kubenswrapper[5014]: E1006 21:32:30.485155 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.486279 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:30 crc kubenswrapper[5014]: E1006 21:32:30.486453 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.510186 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.510284 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.510311 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.510347 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.510371 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:30Z","lastTransitionTime":"2025-10-06T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.613958 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.614031 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.614054 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.614086 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.614110 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:30Z","lastTransitionTime":"2025-10-06T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.718137 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.718199 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.718217 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.718242 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.718261 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:30Z","lastTransitionTime":"2025-10-06T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.822046 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.822113 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.822136 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.822168 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.822191 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:30Z","lastTransitionTime":"2025-10-06T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.925193 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.925244 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.925260 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.925283 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:30 crc kubenswrapper[5014]: I1006 21:32:30.925302 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:30Z","lastTransitionTime":"2025-10-06T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.028769 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.028901 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.028921 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.028945 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.028962 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:31Z","lastTransitionTime":"2025-10-06T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.133388 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.133851 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.134010 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.134169 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.134307 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:31Z","lastTransitionTime":"2025-10-06T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.238384 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.238974 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.239130 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.239336 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.239520 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:31Z","lastTransitionTime":"2025-10-06T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.343496 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.343579 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.343598 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.343711 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.343737 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:31Z","lastTransitionTime":"2025-10-06T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.447104 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.447182 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.447200 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.447233 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.447255 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:31Z","lastTransitionTime":"2025-10-06T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.485104 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:31 crc kubenswrapper[5014]: E1006 21:32:31.485393 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.551276 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.551359 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.551379 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.551418 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.551438 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:31Z","lastTransitionTime":"2025-10-06T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.654910 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.654973 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.654991 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.655015 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.655032 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:31Z","lastTransitionTime":"2025-10-06T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.758562 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.758680 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.758707 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.758744 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.758769 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:31Z","lastTransitionTime":"2025-10-06T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.861173 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.861235 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.861254 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.861276 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.861293 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:31Z","lastTransitionTime":"2025-10-06T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.963959 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.964041 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.964066 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.964102 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:31 crc kubenswrapper[5014]: I1006 21:32:31.964127 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:31Z","lastTransitionTime":"2025-10-06T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.068014 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.068110 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.068163 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.068188 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.068205 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:32Z","lastTransitionTime":"2025-10-06T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.171081 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.171137 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.171156 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.171181 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.171199 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:32Z","lastTransitionTime":"2025-10-06T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.274750 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.274807 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.274824 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.274850 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.274867 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:32Z","lastTransitionTime":"2025-10-06T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.377712 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.377776 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.377795 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.377818 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.377836 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:32Z","lastTransitionTime":"2025-10-06T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.480961 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.481025 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.481043 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.481068 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.481087 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:32Z","lastTransitionTime":"2025-10-06T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.484261 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.484321 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.484266 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:32 crc kubenswrapper[5014]: E1006 21:32:32.484423 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:32 crc kubenswrapper[5014]: E1006 21:32:32.484589 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:32 crc kubenswrapper[5014]: E1006 21:32:32.484690 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.584490 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.584542 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.584561 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.584586 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.584603 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:32Z","lastTransitionTime":"2025-10-06T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.687277 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.687335 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.687376 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.687402 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.687424 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:32Z","lastTransitionTime":"2025-10-06T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.791470 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.791534 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.791552 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.791576 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.791595 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:32Z","lastTransitionTime":"2025-10-06T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.894867 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.894925 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.894943 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.894966 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.894984 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:32Z","lastTransitionTime":"2025-10-06T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.998951 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.999024 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.999044 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.999071 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:32 crc kubenswrapper[5014]: I1006 21:32:32.999100 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:32Z","lastTransitionTime":"2025-10-06T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.001127 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.001203 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.001229 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.001258 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.001293 5014 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-06T21:32:33Z","lastTransitionTime":"2025-10-06T21:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.071509 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8"] Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.072420 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.076772 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.077011 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.077197 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.080270 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.133369 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-r8bdz" podStartSLOduration=77.133343427 podStartE2EDuration="1m17.133343427s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:32:33.096706328 +0000 UTC m=+98.389743102" watchObservedRunningTime="2025-10-06 21:32:33.133343427 +0000 UTC m=+98.426380191" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.155588 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=78.155554967 podStartE2EDuration="1m18.155554967s" podCreationTimestamp="2025-10-06 21:31:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:32:33.135343865 +0000 UTC m=+98.428380679" watchObservedRunningTime="2025-10-06 21:32:33.155554967 +0000 UTC m=+98.448591741" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.217844 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=79.217814932 podStartE2EDuration="1m19.217814932s" podCreationTimestamp="2025-10-06 21:31:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:32:33.199472552 +0000 UTC m=+98.492509336" watchObservedRunningTime="2025-10-06 21:32:33.217814932 +0000 UTC m=+98.510851736" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.218453 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podStartSLOduration=78.218443903 podStartE2EDuration="1m18.218443903s" podCreationTimestamp="2025-10-06 21:31:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:32:33.216989125 +0000 UTC m=+98.510025899" watchObservedRunningTime="2025-10-06 21:32:33.218443903 +0000 UTC m=+98.511480687" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.226602 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/26988a26-a84a-4fd2-82a3-a0a24070a59a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-rvhm8\" (UID: \"26988a26-a84a-4fd2-82a3-a0a24070a59a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.226780 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/26988a26-a84a-4fd2-82a3-a0a24070a59a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-rvhm8\" (UID: \"26988a26-a84a-4fd2-82a3-a0a24070a59a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.226872 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/26988a26-a84a-4fd2-82a3-a0a24070a59a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-rvhm8\" (UID: \"26988a26-a84a-4fd2-82a3-a0a24070a59a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.226965 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/26988a26-a84a-4fd2-82a3-a0a24070a59a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-rvhm8\" (UID: \"26988a26-a84a-4fd2-82a3-a0a24070a59a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.227108 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/26988a26-a84a-4fd2-82a3-a0a24070a59a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-rvhm8\" (UID: \"26988a26-a84a-4fd2-82a3-a0a24070a59a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.295199 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=49.295163936 podStartE2EDuration="49.295163936s" podCreationTimestamp="2025-10-06 21:31:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:32:33.29409522 +0000 UTC m=+98.587131994" watchObservedRunningTime="2025-10-06 21:32:33.295163936 +0000 UTC m=+98.588200710" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.328562 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/26988a26-a84a-4fd2-82a3-a0a24070a59a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-rvhm8\" (UID: \"26988a26-a84a-4fd2-82a3-a0a24070a59a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.328737 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/26988a26-a84a-4fd2-82a3-a0a24070a59a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-rvhm8\" (UID: \"26988a26-a84a-4fd2-82a3-a0a24070a59a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.328789 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/26988a26-a84a-4fd2-82a3-a0a24070a59a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-rvhm8\" (UID: \"26988a26-a84a-4fd2-82a3-a0a24070a59a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.328823 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/26988a26-a84a-4fd2-82a3-a0a24070a59a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-rvhm8\" (UID: \"26988a26-a84a-4fd2-82a3-a0a24070a59a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.328860 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/26988a26-a84a-4fd2-82a3-a0a24070a59a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-rvhm8\" (UID: \"26988a26-a84a-4fd2-82a3-a0a24070a59a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.328940 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/26988a26-a84a-4fd2-82a3-a0a24070a59a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-rvhm8\" (UID: \"26988a26-a84a-4fd2-82a3-a0a24070a59a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.328970 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/26988a26-a84a-4fd2-82a3-a0a24070a59a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-rvhm8\" (UID: \"26988a26-a84a-4fd2-82a3-a0a24070a59a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.330399 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/26988a26-a84a-4fd2-82a3-a0a24070a59a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-rvhm8\" (UID: \"26988a26-a84a-4fd2-82a3-a0a24070a59a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.341040 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/26988a26-a84a-4fd2-82a3-a0a24070a59a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-rvhm8\" (UID: \"26988a26-a84a-4fd2-82a3-a0a24070a59a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.358904 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/26988a26-a84a-4fd2-82a3-a0a24070a59a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-rvhm8\" (UID: \"26988a26-a84a-4fd2-82a3-a0a24070a59a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.401295 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.425174 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-rmjtc" podStartSLOduration=78.42514913 podStartE2EDuration="1m18.42514913s" podCreationTimestamp="2025-10-06 21:31:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:32:33.392178276 +0000 UTC m=+98.685215050" watchObservedRunningTime="2025-10-06 21:32:33.42514913 +0000 UTC m=+98.718185894" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.425838 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-8ddbf" podStartSLOduration=78.425829214 podStartE2EDuration="1m18.425829214s" podCreationTimestamp="2025-10-06 21:31:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:32:33.424496888 +0000 UTC m=+98.717533702" watchObservedRunningTime="2025-10-06 21:32:33.425829214 +0000 UTC m=+98.718865978" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.450908 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-gqd7v" podStartSLOduration=78.45088631 podStartE2EDuration="1m18.45088631s" podCreationTimestamp="2025-10-06 21:31:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:32:33.449557565 +0000 UTC m=+98.742594369" watchObservedRunningTime="2025-10-06 21:32:33.45088631 +0000 UTC m=+98.743923074" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.470433 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bj245" podStartSLOduration=77.47040471 podStartE2EDuration="1m17.47040471s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:32:33.467368027 +0000 UTC m=+98.760404771" watchObservedRunningTime="2025-10-06 21:32:33.47040471 +0000 UTC m=+98.763441474" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.483792 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:33 crc kubenswrapper[5014]: E1006 21:32:33.487237 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.508788 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=17.508761627 podStartE2EDuration="17.508761627s" podCreationTimestamp="2025-10-06 21:32:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:32:33.485959606 +0000 UTC m=+98.778996410" watchObservedRunningTime="2025-10-06 21:32:33.508761627 +0000 UTC m=+98.801798391" Oct 06 21:32:33 crc kubenswrapper[5014]: I1006 21:32:33.532437 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=75.532418466 podStartE2EDuration="1m15.532418466s" podCreationTimestamp="2025-10-06 21:31:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:32:33.508996994 +0000 UTC m=+98.802033758" watchObservedRunningTime="2025-10-06 21:32:33.532418466 +0000 UTC m=+98.825455220" Oct 06 21:32:34 crc kubenswrapper[5014]: I1006 21:32:34.147665 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" event={"ID":"26988a26-a84a-4fd2-82a3-a0a24070a59a","Type":"ContainerStarted","Data":"769237c2ff2f7272c66fc4740bd59a8754234ee7a53e0a94f6df59c9494ac607"} Oct 06 21:32:34 crc kubenswrapper[5014]: I1006 21:32:34.149147 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" event={"ID":"26988a26-a84a-4fd2-82a3-a0a24070a59a","Type":"ContainerStarted","Data":"fa090a0c19d76a5689ce997e67fbe5f0a0dfdd01c76f190961f593456f8ca743"} Oct 06 21:32:34 crc kubenswrapper[5014]: I1006 21:32:34.175402 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-rvhm8" podStartSLOduration=79.1753723 podStartE2EDuration="1m19.1753723s" podCreationTimestamp="2025-10-06 21:31:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:32:34.173344641 +0000 UTC m=+99.466381415" watchObservedRunningTime="2025-10-06 21:32:34.1753723 +0000 UTC m=+99.468409074" Oct 06 21:32:34 crc kubenswrapper[5014]: I1006 21:32:34.240352 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs\") pod \"network-metrics-daemon-chcf6\" (UID: \"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\") " pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:34 crc kubenswrapper[5014]: E1006 21:32:34.240647 5014 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 06 21:32:34 crc kubenswrapper[5014]: E1006 21:32:34.241001 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs podName:e4dbffab-5f6a-4ba5-b0c3-68e7e8840621 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:38.240970756 +0000 UTC m=+163.534007530 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs") pod "network-metrics-daemon-chcf6" (UID: "e4dbffab-5f6a-4ba5-b0c3-68e7e8840621") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 06 21:32:34 crc kubenswrapper[5014]: I1006 21:32:34.483609 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:34 crc kubenswrapper[5014]: I1006 21:32:34.483675 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:34 crc kubenswrapper[5014]: E1006 21:32:34.483830 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:34 crc kubenswrapper[5014]: E1006 21:32:34.484011 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:34 crc kubenswrapper[5014]: I1006 21:32:34.484396 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:34 crc kubenswrapper[5014]: E1006 21:32:34.484685 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:35 crc kubenswrapper[5014]: I1006 21:32:35.484546 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:35 crc kubenswrapper[5014]: E1006 21:32:35.486855 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:36 crc kubenswrapper[5014]: I1006 21:32:36.484211 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:36 crc kubenswrapper[5014]: E1006 21:32:36.484383 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:36 crc kubenswrapper[5014]: I1006 21:32:36.484471 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:36 crc kubenswrapper[5014]: E1006 21:32:36.484551 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:36 crc kubenswrapper[5014]: I1006 21:32:36.484614 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:36 crc kubenswrapper[5014]: E1006 21:32:36.484731 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:37 crc kubenswrapper[5014]: I1006 21:32:37.483957 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:37 crc kubenswrapper[5014]: E1006 21:32:37.484187 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:38 crc kubenswrapper[5014]: I1006 21:32:38.483940 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:38 crc kubenswrapper[5014]: I1006 21:32:38.483983 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:38 crc kubenswrapper[5014]: I1006 21:32:38.484090 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:38 crc kubenswrapper[5014]: E1006 21:32:38.484389 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:38 crc kubenswrapper[5014]: E1006 21:32:38.485522 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:38 crc kubenswrapper[5014]: E1006 21:32:38.485684 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:38 crc kubenswrapper[5014]: I1006 21:32:38.486415 5014 scope.go:117] "RemoveContainer" containerID="f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828" Oct 06 21:32:38 crc kubenswrapper[5014]: E1006 21:32:38.486759 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" Oct 06 21:32:39 crc kubenswrapper[5014]: I1006 21:32:39.484561 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:39 crc kubenswrapper[5014]: E1006 21:32:39.484807 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:40 crc kubenswrapper[5014]: I1006 21:32:40.483997 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:40 crc kubenswrapper[5014]: I1006 21:32:40.484041 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:40 crc kubenswrapper[5014]: I1006 21:32:40.484021 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:40 crc kubenswrapper[5014]: E1006 21:32:40.484184 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:40 crc kubenswrapper[5014]: E1006 21:32:40.484515 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:40 crc kubenswrapper[5014]: E1006 21:32:40.484744 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:41 crc kubenswrapper[5014]: I1006 21:32:41.483540 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:41 crc kubenswrapper[5014]: E1006 21:32:41.483753 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:42 crc kubenswrapper[5014]: I1006 21:32:42.484118 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:42 crc kubenswrapper[5014]: I1006 21:32:42.484188 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:42 crc kubenswrapper[5014]: E1006 21:32:42.484302 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:42 crc kubenswrapper[5014]: I1006 21:32:42.484426 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:42 crc kubenswrapper[5014]: E1006 21:32:42.484683 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:42 crc kubenswrapper[5014]: E1006 21:32:42.491166 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:43 crc kubenswrapper[5014]: I1006 21:32:43.484142 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:43 crc kubenswrapper[5014]: E1006 21:32:43.484362 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:44 crc kubenswrapper[5014]: I1006 21:32:44.483948 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:44 crc kubenswrapper[5014]: I1006 21:32:44.483982 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:44 crc kubenswrapper[5014]: I1006 21:32:44.484049 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:44 crc kubenswrapper[5014]: E1006 21:32:44.484134 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:44 crc kubenswrapper[5014]: E1006 21:32:44.484302 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:44 crc kubenswrapper[5014]: E1006 21:32:44.484358 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:45 crc kubenswrapper[5014]: I1006 21:32:45.483986 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:45 crc kubenswrapper[5014]: E1006 21:32:45.485264 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:46 crc kubenswrapper[5014]: I1006 21:32:46.483722 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:46 crc kubenswrapper[5014]: I1006 21:32:46.483828 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:46 crc kubenswrapper[5014]: E1006 21:32:46.483923 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:46 crc kubenswrapper[5014]: I1006 21:32:46.483987 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:46 crc kubenswrapper[5014]: E1006 21:32:46.484131 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:46 crc kubenswrapper[5014]: E1006 21:32:46.484429 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:47 crc kubenswrapper[5014]: I1006 21:32:47.484077 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:47 crc kubenswrapper[5014]: E1006 21:32:47.484681 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:48 crc kubenswrapper[5014]: I1006 21:32:48.483961 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:48 crc kubenswrapper[5014]: I1006 21:32:48.484013 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:48 crc kubenswrapper[5014]: I1006 21:32:48.483968 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:48 crc kubenswrapper[5014]: E1006 21:32:48.484227 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:48 crc kubenswrapper[5014]: E1006 21:32:48.484405 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:48 crc kubenswrapper[5014]: E1006 21:32:48.484527 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:49 crc kubenswrapper[5014]: I1006 21:32:49.483753 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:49 crc kubenswrapper[5014]: E1006 21:32:49.483943 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:50 crc kubenswrapper[5014]: I1006 21:32:50.210514 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8ddbf_9f1464a5-d713-4f79-8248-33c69abcdac2/kube-multus/1.log" Oct 06 21:32:50 crc kubenswrapper[5014]: I1006 21:32:50.211166 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8ddbf_9f1464a5-d713-4f79-8248-33c69abcdac2/kube-multus/0.log" Oct 06 21:32:50 crc kubenswrapper[5014]: I1006 21:32:50.211210 5014 generic.go:334] "Generic (PLEG): container finished" podID="9f1464a5-d713-4f79-8248-33c69abcdac2" containerID="c0cf1ba6616443cb4de3d3036bcacb1b1672bcd98fc50e2162c40b5fdfcb7583" exitCode=1 Oct 06 21:32:50 crc kubenswrapper[5014]: I1006 21:32:50.211241 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8ddbf" event={"ID":"9f1464a5-d713-4f79-8248-33c69abcdac2","Type":"ContainerDied","Data":"c0cf1ba6616443cb4de3d3036bcacb1b1672bcd98fc50e2162c40b5fdfcb7583"} Oct 06 21:32:50 crc kubenswrapper[5014]: I1006 21:32:50.211273 5014 scope.go:117] "RemoveContainer" containerID="7fb4425e6883a48fbe54f2584af0dd5ee48adaec857315010d29f61af54f49f4" Oct 06 21:32:50 crc kubenswrapper[5014]: I1006 21:32:50.212074 5014 scope.go:117] "RemoveContainer" containerID="c0cf1ba6616443cb4de3d3036bcacb1b1672bcd98fc50e2162c40b5fdfcb7583" Oct 06 21:32:50 crc kubenswrapper[5014]: E1006 21:32:50.212453 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-8ddbf_openshift-multus(9f1464a5-d713-4f79-8248-33c69abcdac2)\"" pod="openshift-multus/multus-8ddbf" podUID="9f1464a5-d713-4f79-8248-33c69abcdac2" Oct 06 21:32:50 crc kubenswrapper[5014]: I1006 21:32:50.483781 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:50 crc kubenswrapper[5014]: I1006 21:32:50.483815 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:50 crc kubenswrapper[5014]: I1006 21:32:50.483971 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:50 crc kubenswrapper[5014]: E1006 21:32:50.484122 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:50 crc kubenswrapper[5014]: E1006 21:32:50.484335 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:50 crc kubenswrapper[5014]: E1006 21:32:50.484860 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:50 crc kubenswrapper[5014]: I1006 21:32:50.485278 5014 scope.go:117] "RemoveContainer" containerID="f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828" Oct 06 21:32:50 crc kubenswrapper[5014]: E1006 21:32:50.485519 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2wj75_openshift-ovn-kubernetes(5d2de4ac-a423-4f5a-904a-817553f204f6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" Oct 06 21:32:51 crc kubenswrapper[5014]: I1006 21:32:51.216459 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8ddbf_9f1464a5-d713-4f79-8248-33c69abcdac2/kube-multus/1.log" Oct 06 21:32:51 crc kubenswrapper[5014]: I1006 21:32:51.484229 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:51 crc kubenswrapper[5014]: E1006 21:32:51.484410 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:52 crc kubenswrapper[5014]: I1006 21:32:52.483800 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:52 crc kubenswrapper[5014]: I1006 21:32:52.483814 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:52 crc kubenswrapper[5014]: E1006 21:32:52.484085 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:52 crc kubenswrapper[5014]: I1006 21:32:52.483834 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:52 crc kubenswrapper[5014]: E1006 21:32:52.484251 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:52 crc kubenswrapper[5014]: E1006 21:32:52.484389 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:53 crc kubenswrapper[5014]: I1006 21:32:53.484080 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:53 crc kubenswrapper[5014]: E1006 21:32:53.484336 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:54 crc kubenswrapper[5014]: I1006 21:32:54.483739 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:54 crc kubenswrapper[5014]: I1006 21:32:54.483752 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:54 crc kubenswrapper[5014]: E1006 21:32:54.483940 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:54 crc kubenswrapper[5014]: E1006 21:32:54.484082 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:54 crc kubenswrapper[5014]: I1006 21:32:54.484903 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:54 crc kubenswrapper[5014]: E1006 21:32:54.485135 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:55 crc kubenswrapper[5014]: E1006 21:32:55.438650 5014 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Oct 06 21:32:55 crc kubenswrapper[5014]: I1006 21:32:55.483766 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:55 crc kubenswrapper[5014]: E1006 21:32:55.486350 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:55 crc kubenswrapper[5014]: E1006 21:32:55.597972 5014 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 06 21:32:56 crc kubenswrapper[5014]: I1006 21:32:56.483927 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:56 crc kubenswrapper[5014]: I1006 21:32:56.483975 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:56 crc kubenswrapper[5014]: I1006 21:32:56.484018 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:56 crc kubenswrapper[5014]: E1006 21:32:56.484126 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:56 crc kubenswrapper[5014]: E1006 21:32:56.484269 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:56 crc kubenswrapper[5014]: E1006 21:32:56.484422 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:57 crc kubenswrapper[5014]: I1006 21:32:57.483700 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:57 crc kubenswrapper[5014]: E1006 21:32:57.483882 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:32:58 crc kubenswrapper[5014]: I1006 21:32:58.483884 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:32:58 crc kubenswrapper[5014]: I1006 21:32:58.483960 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:32:58 crc kubenswrapper[5014]: I1006 21:32:58.483883 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:32:58 crc kubenswrapper[5014]: E1006 21:32:58.484044 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:32:58 crc kubenswrapper[5014]: E1006 21:32:58.484387 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:32:58 crc kubenswrapper[5014]: E1006 21:32:58.484375 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:32:59 crc kubenswrapper[5014]: I1006 21:32:59.483727 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:32:59 crc kubenswrapper[5014]: E1006 21:32:59.483972 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:33:00 crc kubenswrapper[5014]: I1006 21:33:00.484312 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:33:00 crc kubenswrapper[5014]: I1006 21:33:00.484424 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:33:00 crc kubenswrapper[5014]: E1006 21:33:00.484542 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:33:00 crc kubenswrapper[5014]: I1006 21:33:00.484363 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:33:00 crc kubenswrapper[5014]: E1006 21:33:00.484773 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:33:00 crc kubenswrapper[5014]: E1006 21:33:00.484895 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:33:00 crc kubenswrapper[5014]: E1006 21:33:00.599580 5014 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 06 21:33:01 crc kubenswrapper[5014]: I1006 21:33:01.484509 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:33:01 crc kubenswrapper[5014]: E1006 21:33:01.484718 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:33:02 crc kubenswrapper[5014]: I1006 21:33:02.483773 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:33:02 crc kubenswrapper[5014]: E1006 21:33:02.483944 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:33:02 crc kubenswrapper[5014]: I1006 21:33:02.483800 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:33:02 crc kubenswrapper[5014]: I1006 21:33:02.484024 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:33:02 crc kubenswrapper[5014]: E1006 21:33:02.484060 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:33:02 crc kubenswrapper[5014]: E1006 21:33:02.484212 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:33:03 crc kubenswrapper[5014]: I1006 21:33:03.484240 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:33:03 crc kubenswrapper[5014]: E1006 21:33:03.484493 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:33:04 crc kubenswrapper[5014]: I1006 21:33:04.484073 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:33:04 crc kubenswrapper[5014]: I1006 21:33:04.484139 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:33:04 crc kubenswrapper[5014]: E1006 21:33:04.484305 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:33:04 crc kubenswrapper[5014]: I1006 21:33:04.484346 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:33:04 crc kubenswrapper[5014]: E1006 21:33:04.484586 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:33:04 crc kubenswrapper[5014]: E1006 21:33:04.484730 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:33:04 crc kubenswrapper[5014]: I1006 21:33:04.485764 5014 scope.go:117] "RemoveContainer" containerID="f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828" Oct 06 21:33:05 crc kubenswrapper[5014]: I1006 21:33:05.274061 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovnkube-controller/3.log" Oct 06 21:33:05 crc kubenswrapper[5014]: I1006 21:33:05.276863 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerStarted","Data":"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f"} Oct 06 21:33:05 crc kubenswrapper[5014]: I1006 21:33:05.278411 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:33:05 crc kubenswrapper[5014]: I1006 21:33:05.483651 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:33:05 crc kubenswrapper[5014]: I1006 21:33:05.486740 5014 scope.go:117] "RemoveContainer" containerID="c0cf1ba6616443cb4de3d3036bcacb1b1672bcd98fc50e2162c40b5fdfcb7583" Oct 06 21:33:05 crc kubenswrapper[5014]: E1006 21:33:05.486438 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:33:05 crc kubenswrapper[5014]: I1006 21:33:05.508041 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podStartSLOduration=110.507995835 podStartE2EDuration="1m50.507995835s" podCreationTimestamp="2025-10-06 21:31:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:05.313037115 +0000 UTC m=+130.606073849" watchObservedRunningTime="2025-10-06 21:33:05.507995835 +0000 UTC m=+130.801032609" Oct 06 21:33:05 crc kubenswrapper[5014]: I1006 21:33:05.552072 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-chcf6"] Oct 06 21:33:05 crc kubenswrapper[5014]: E1006 21:33:05.600738 5014 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 06 21:33:06 crc kubenswrapper[5014]: I1006 21:33:06.283997 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8ddbf_9f1464a5-d713-4f79-8248-33c69abcdac2/kube-multus/1.log" Oct 06 21:33:06 crc kubenswrapper[5014]: I1006 21:33:06.284127 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:33:06 crc kubenswrapper[5014]: I1006 21:33:06.284149 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8ddbf" event={"ID":"9f1464a5-d713-4f79-8248-33c69abcdac2","Type":"ContainerStarted","Data":"dbd4cf9bbd8472079722bf34bcdf563ce5e2cf12258cf52892b40da6cea24571"} Oct 06 21:33:06 crc kubenswrapper[5014]: E1006 21:33:06.284283 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:33:06 crc kubenswrapper[5014]: I1006 21:33:06.484038 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:33:06 crc kubenswrapper[5014]: E1006 21:33:06.484912 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:33:06 crc kubenswrapper[5014]: I1006 21:33:06.484256 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:33:06 crc kubenswrapper[5014]: E1006 21:33:06.485077 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:33:06 crc kubenswrapper[5014]: I1006 21:33:06.484186 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:33:06 crc kubenswrapper[5014]: E1006 21:33:06.485242 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:33:08 crc kubenswrapper[5014]: I1006 21:33:08.483762 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:33:08 crc kubenswrapper[5014]: I1006 21:33:08.483908 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:33:08 crc kubenswrapper[5014]: I1006 21:33:08.484032 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:33:08 crc kubenswrapper[5014]: E1006 21:33:08.484141 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:33:08 crc kubenswrapper[5014]: E1006 21:33:08.484293 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:33:08 crc kubenswrapper[5014]: I1006 21:33:08.484400 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:33:08 crc kubenswrapper[5014]: E1006 21:33:08.484496 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:33:08 crc kubenswrapper[5014]: E1006 21:33:08.485054 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:33:10 crc kubenswrapper[5014]: I1006 21:33:10.484310 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:33:10 crc kubenswrapper[5014]: I1006 21:33:10.484389 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:33:10 crc kubenswrapper[5014]: I1006 21:33:10.484445 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:33:10 crc kubenswrapper[5014]: E1006 21:33:10.484549 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-chcf6" podUID="e4dbffab-5f6a-4ba5-b0c3-68e7e8840621" Oct 06 21:33:10 crc kubenswrapper[5014]: I1006 21:33:10.484579 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:33:10 crc kubenswrapper[5014]: E1006 21:33:10.484802 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 06 21:33:10 crc kubenswrapper[5014]: E1006 21:33:10.484981 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 06 21:33:10 crc kubenswrapper[5014]: E1006 21:33:10.485162 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 06 21:33:12 crc kubenswrapper[5014]: I1006 21:33:12.484328 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:33:12 crc kubenswrapper[5014]: I1006 21:33:12.484431 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:33:12 crc kubenswrapper[5014]: I1006 21:33:12.484326 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:33:12 crc kubenswrapper[5014]: I1006 21:33:12.484349 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:33:12 crc kubenswrapper[5014]: I1006 21:33:12.486577 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Oct 06 21:33:12 crc kubenswrapper[5014]: I1006 21:33:12.486968 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Oct 06 21:33:12 crc kubenswrapper[5014]: I1006 21:33:12.487373 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Oct 06 21:33:12 crc kubenswrapper[5014]: I1006 21:33:12.487692 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Oct 06 21:33:12 crc kubenswrapper[5014]: I1006 21:33:12.488464 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Oct 06 21:33:12 crc kubenswrapper[5014]: I1006 21:33:12.489575 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.015238 5014 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.110063 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pc5xx"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.110948 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.111557 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-297xp"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.112032 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.113288 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.113716 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.114095 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.114746 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lts6v"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.115116 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.115254 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.115931 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.116178 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.116778 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.116962 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.117160 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.117274 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-wd97j"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.117764 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-wd97j" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.118355 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.118929 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.121262 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-s45dn"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.122134 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-vdpkd"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.122239 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.123348 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.123429 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.124709 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.125665 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.126374 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.127182 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.127597 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.128011 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.128896 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.129194 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.130803 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.131338 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.131871 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.132018 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.132355 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.132407 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.132363 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.132736 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.132915 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.133885 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.134325 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.134605 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.135522 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.144226 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.144931 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.135649 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.146497 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.147963 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.148111 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.148738 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.149404 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.151141 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.151401 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.151423 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.152412 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.152452 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.157977 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.158156 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.161981 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-t2756"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.162593 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.162605 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.162827 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.162921 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.163138 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.163288 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.163593 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.162925 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.164130 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.164192 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.164206 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.164227 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.164234 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.164150 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.164451 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.164752 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.164832 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.165030 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.165087 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.165045 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.165180 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.165227 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.165278 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.165433 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.165464 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.165517 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.165699 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.165797 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.165986 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.166684 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.168381 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-dq58g"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.169045 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.170033 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.170146 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.170432 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.170595 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.170659 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.171042 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.171277 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.171472 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.172402 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.174523 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.174676 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-6kdh6"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.175076 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.175328 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.175515 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.175649 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.175677 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-6kdh6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.176333 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-kb2p5"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.176542 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.176911 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.185429 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.185673 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.185773 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.185947 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.186178 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.186294 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.186319 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.186448 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.186605 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.186607 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.186652 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.186687 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.188408 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.188936 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-zh4s7"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.189368 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.189375 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-zh4s7" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.189407 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.196634 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.198412 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.199072 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.199191 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.199401 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.199655 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.199679 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.199781 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.199822 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.199866 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.199889 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.199992 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.200602 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.200685 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.199783 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.200859 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.200918 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.201530 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.202913 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/04070324-674e-4785-aada-ad9ffe6e89c8-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-lts6v\" (UID: \"04070324-674e-4785-aada-ad9ffe6e89c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.203924 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48bcee5f-3c11-4784-aa10-c5673058c7b1-serving-cert\") pod \"controller-manager-879f6c89f-pc5xx\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.204020 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sz9zw\" (UniqueName: \"kubernetes.io/projected/48bcee5f-3c11-4784-aa10-c5673058c7b1-kube-api-access-sz9zw\") pod \"controller-manager-879f6c89f-pc5xx\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.204067 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-pc5xx\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.204317 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/04070324-674e-4785-aada-ad9ffe6e89c8-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-lts6v\" (UID: \"04070324-674e-4785-aada-ad9ffe6e89c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.204580 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-client-ca\") pod \"controller-manager-879f6c89f-pc5xx\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.204628 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-config\") pod \"controller-manager-879f6c89f-pc5xx\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.205271 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7jjh\" (UniqueName: \"kubernetes.io/projected/04070324-674e-4785-aada-ad9ffe6e89c8-kube-api-access-x7jjh\") pod \"marketplace-operator-79b997595-lts6v\" (UID: \"04070324-674e-4785-aada-ad9ffe6e89c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.206966 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.207288 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-pxwc5"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.233579 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.233963 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.234202 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-pxwc5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.236823 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.238264 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-77xrv"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.238524 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-zvv9h"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.238810 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-n59hv"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.238914 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.238997 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-77xrv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.239222 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.239440 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.239705 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n59hv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.240657 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.240744 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.243512 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-wqnvh"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.244284 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-wqnvh" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.250300 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zjxxb"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.250926 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-zjxxb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.251014 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.251772 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.253556 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.257291 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.257900 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.261176 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-v74l6"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.261820 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.265061 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.265833 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-ffz9s"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.266312 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-297xp"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.266331 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lts6v"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.266404 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.266446 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.267176 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-wd97j"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.268729 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pc5xx"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.270800 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-6lc74"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.271269 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-vdpkd"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.271327 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-6lc74" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.272092 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.272884 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.273202 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.274391 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.275581 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.278299 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.279442 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.280835 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-pxwc5"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.282633 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-zvv9h"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.283913 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.286217 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.287281 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-zh4s7"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.288516 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.289894 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.290954 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.292049 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.292849 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.293642 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.299353 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-6kdh6"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.299399 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.300437 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-dq58g"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.302874 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-kb2p5"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.303986 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.305159 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306225 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-97wsg"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306587 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/627924fa-cfa6-404d-91a0-fbf6505ce05b-signing-key\") pod \"service-ca-9c57cc56f-wd97j\" (UID: \"627924fa-cfa6-404d-91a0-fbf6505ce05b\") " pod="openshift-service-ca/service-ca-9c57cc56f-wd97j" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306639 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/65d0615f-167e-462b-b846-cee104cdeec4-proxy-tls\") pod \"machine-config-operator-74547568cd-8pkfv\" (UID: \"65d0615f-167e-462b-b846-cee104cdeec4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306662 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d3d88c45-8fdc-407e-a537-c45a05dedc4b-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306679 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrcn2\" (UniqueName: \"kubernetes.io/projected/7a0e9e21-4f5b-4e63-b976-1f16d01f357c-kube-api-access-qrcn2\") pod \"machine-config-controller-84d6567774-zjlcj\" (UID: \"7a0e9e21-4f5b-4e63-b976-1f16d01f357c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306695 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/15ca2d23-bdd0-4951-aeb5-6544fd2e43d5-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-cl7xx\" (UID: \"15ca2d23-bdd0-4951-aeb5-6544fd2e43d5\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306712 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7hcb\" (UniqueName: \"kubernetes.io/projected/65d0615f-167e-462b-b846-cee104cdeec4-kube-api-access-p7hcb\") pod \"machine-config-operator-74547568cd-8pkfv\" (UID: \"65d0615f-167e-462b-b846-cee104cdeec4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306728 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3693c72b-1c6a-4362-bda2-6d5ea365cd38-installation-pull-secrets\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306744 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/800f445b-95a6-4098-bf5f-ea91a7ead3d0-service-ca-bundle\") pod \"router-default-5444994796-s45dn\" (UID: \"800f445b-95a6-4098-bf5f-ea91a7ead3d0\") " pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306761 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a8703611-a93c-4633-8264-4c3ce3eaf77e-audit\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306778 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a8703611-a93c-4633-8264-4c3ce3eaf77e-etcd-serving-ca\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306793 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9aabe83-7840-47cc-b3d5-72b068737094-config\") pod \"route-controller-manager-6576b87f9c-qnjng\" (UID: \"b9aabe83-7840-47cc-b3d5-72b068737094\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306819 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-pc5xx\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306835 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a8703611-a93c-4633-8264-4c3ce3eaf77e-image-import-ca\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306848 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a8703611-a93c-4633-8264-4c3ce3eaf77e-trusted-ca-bundle\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306871 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjjjw\" (UniqueName: \"kubernetes.io/projected/627924fa-cfa6-404d-91a0-fbf6505ce05b-kube-api-access-jjjjw\") pod \"service-ca-9c57cc56f-wd97j\" (UID: \"627924fa-cfa6-404d-91a0-fbf6505ce05b\") " pod="openshift-service-ca/service-ca-9c57cc56f-wd97j" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306886 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5474w\" (UniqueName: \"kubernetes.io/projected/d3d88c45-8fdc-407e-a537-c45a05dedc4b-kube-api-access-5474w\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306901 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fbda3f6d-582e-4290-8939-e06c2f971f0e-serving-cert\") pod \"openshift-config-operator-7777fb866f-6dcxq\" (UID: \"fbda3f6d-582e-4290-8939-e06c2f971f0e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306916 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d3d88c45-8fdc-407e-a537-c45a05dedc4b-audit-dir\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306935 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306951 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6xrf\" (UniqueName: \"kubernetes.io/projected/a8703611-a93c-4633-8264-4c3ce3eaf77e-kube-api-access-f6xrf\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306964 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-97wsg" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.306968 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsj5g\" (UniqueName: \"kubernetes.io/projected/b9aabe83-7840-47cc-b3d5-72b068737094-kube-api-access-rsj5g\") pod \"route-controller-manager-6576b87f9c-qnjng\" (UID: \"b9aabe83-7840-47cc-b3d5-72b068737094\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307212 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-config\") pod \"controller-manager-879f6c89f-pc5xx\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307233 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d3d88c45-8fdc-407e-a537-c45a05dedc4b-audit-policies\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307256 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7jjh\" (UniqueName: \"kubernetes.io/projected/04070324-674e-4785-aada-ad9ffe6e89c8-kube-api-access-x7jjh\") pod \"marketplace-operator-79b997595-lts6v\" (UID: \"04070324-674e-4785-aada-ad9ffe6e89c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307275 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6rq5\" (UniqueName: \"kubernetes.io/projected/15ca2d23-bdd0-4951-aeb5-6544fd2e43d5-kube-api-access-w6rq5\") pod \"package-server-manager-789f6589d5-cl7xx\" (UID: \"15ca2d23-bdd0-4951-aeb5-6544fd2e43d5\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307293 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/854e3c81-1547-47c9-b15c-2f226239301d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-kgppv\" (UID: \"854e3c81-1547-47c9-b15c-2f226239301d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" Oct 06 21:33:14 crc kubenswrapper[5014]: E1006 21:33:14.307302 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:14.807291021 +0000 UTC m=+140.100327755 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307322 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/65d0615f-167e-462b-b846-cee104cdeec4-auth-proxy-config\") pod \"machine-config-operator-74547568cd-8pkfv\" (UID: \"65d0615f-167e-462b-b846-cee104cdeec4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307339 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a8703611-a93c-4633-8264-4c3ce3eaf77e-node-pullsecrets\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307356 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-registry-tls\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307371 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/800f445b-95a6-4098-bf5f-ea91a7ead3d0-default-certificate\") pod \"router-default-5444994796-s45dn\" (UID: \"800f445b-95a6-4098-bf5f-ea91a7ead3d0\") " pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307386 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6k4t\" (UniqueName: \"kubernetes.io/projected/800f445b-95a6-4098-bf5f-ea91a7ead3d0-kube-api-access-p6k4t\") pod \"router-default-5444994796-s45dn\" (UID: \"800f445b-95a6-4098-bf5f-ea91a7ead3d0\") " pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307403 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-bound-sa-token\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307417 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a8703611-a93c-4633-8264-4c3ce3eaf77e-audit-dir\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307434 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/04070324-674e-4785-aada-ad9ffe6e89c8-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-lts6v\" (UID: \"04070324-674e-4785-aada-ad9ffe6e89c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307448 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48bcee5f-3c11-4784-aa10-c5673058c7b1-serving-cert\") pod \"controller-manager-879f6c89f-pc5xx\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307472 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2qpv\" (UniqueName: \"kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-kube-api-access-q2qpv\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307487 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d3d88c45-8fdc-407e-a537-c45a05dedc4b-encryption-config\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307505 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/fbda3f6d-582e-4290-8939-e06c2f971f0e-available-featuregates\") pod \"openshift-config-operator-7777fb866f-6dcxq\" (UID: \"fbda3f6d-582e-4290-8939-e06c2f971f0e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307520 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/65d0615f-167e-462b-b846-cee104cdeec4-images\") pod \"machine-config-operator-74547568cd-8pkfv\" (UID: \"65d0615f-167e-462b-b846-cee104cdeec4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307533 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a8703611-a93c-4633-8264-4c3ce3eaf77e-encryption-config\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307547 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b9aabe83-7840-47cc-b3d5-72b068737094-client-ca\") pod \"route-controller-manager-6576b87f9c-qnjng\" (UID: \"b9aabe83-7840-47cc-b3d5-72b068737094\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307569 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sz9zw\" (UniqueName: \"kubernetes.io/projected/48bcee5f-3c11-4784-aa10-c5673058c7b1-kube-api-access-sz9zw\") pod \"controller-manager-879f6c89f-pc5xx\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307586 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9f5n\" (UniqueName: \"kubernetes.io/projected/854e3c81-1547-47c9-b15c-2f226239301d-kube-api-access-w9f5n\") pod \"cluster-image-registry-operator-dc59b4c8b-kgppv\" (UID: \"854e3c81-1547-47c9-b15c-2f226239301d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307603 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a8703611-a93c-4633-8264-4c3ce3eaf77e-serving-cert\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307633 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9aabe83-7840-47cc-b3d5-72b068737094-serving-cert\") pod \"route-controller-manager-6576b87f9c-qnjng\" (UID: \"b9aabe83-7840-47cc-b3d5-72b068737094\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307648 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/854e3c81-1547-47c9-b15c-2f226239301d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-kgppv\" (UID: \"854e3c81-1547-47c9-b15c-2f226239301d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307664 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/627924fa-cfa6-404d-91a0-fbf6505ce05b-signing-cabundle\") pod \"service-ca-9c57cc56f-wd97j\" (UID: \"627924fa-cfa6-404d-91a0-fbf6505ce05b\") " pod="openshift-service-ca/service-ca-9c57cc56f-wd97j" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307687 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/04070324-674e-4785-aada-ad9ffe6e89c8-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-lts6v\" (UID: \"04070324-674e-4785-aada-ad9ffe6e89c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307704 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3693c72b-1c6a-4362-bda2-6d5ea365cd38-ca-trust-extracted\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307722 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8703611-a93c-4633-8264-4c3ce3eaf77e-config\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307739 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d3d88c45-8fdc-407e-a537-c45a05dedc4b-etcd-client\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307758 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d3d88c45-8fdc-407e-a537-c45a05dedc4b-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307775 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/854e3c81-1547-47c9-b15c-2f226239301d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-kgppv\" (UID: \"854e3c81-1547-47c9-b15c-2f226239301d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307792 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-client-ca\") pod \"controller-manager-879f6c89f-pc5xx\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307810 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3693c72b-1c6a-4362-bda2-6d5ea365cd38-registry-certificates\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307827 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3693c72b-1c6a-4362-bda2-6d5ea365cd38-trusted-ca\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307842 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/800f445b-95a6-4098-bf5f-ea91a7ead3d0-stats-auth\") pod \"router-default-5444994796-s45dn\" (UID: \"800f445b-95a6-4098-bf5f-ea91a7ead3d0\") " pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307857 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/800f445b-95a6-4098-bf5f-ea91a7ead3d0-metrics-certs\") pod \"router-default-5444994796-s45dn\" (UID: \"800f445b-95a6-4098-bf5f-ea91a7ead3d0\") " pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307874 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d3d88c45-8fdc-407e-a537-c45a05dedc4b-serving-cert\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307893 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7a0e9e21-4f5b-4e63-b976-1f16d01f357c-proxy-tls\") pod \"machine-config-controller-84d6567774-zjlcj\" (UID: \"7a0e9e21-4f5b-4e63-b976-1f16d01f357c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307910 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7a0e9e21-4f5b-4e63-b976-1f16d01f357c-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-zjlcj\" (UID: \"7a0e9e21-4f5b-4e63-b976-1f16d01f357c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307924 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a8703611-a93c-4633-8264-4c3ce3eaf77e-etcd-client\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.307941 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w75jv\" (UniqueName: \"kubernetes.io/projected/fbda3f6d-582e-4290-8939-e06c2f971f0e-kube-api-access-w75jv\") pod \"openshift-config-operator-7777fb866f-6dcxq\" (UID: \"fbda3f6d-582e-4290-8939-e06c2f971f0e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.308105 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-pc5xx\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.308758 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-n59hv"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.308892 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.309199 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-config\") pod \"controller-manager-879f6c89f-pc5xx\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.315888 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/04070324-674e-4785-aada-ad9ffe6e89c8-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-lts6v\" (UID: \"04070324-674e-4785-aada-ad9ffe6e89c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.316684 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-client-ca\") pod \"controller-manager-879f6c89f-pc5xx\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.317471 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-ffz9s"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.318357 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/04070324-674e-4785-aada-ad9ffe6e89c8-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-lts6v\" (UID: \"04070324-674e-4785-aada-ad9ffe6e89c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.318988 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-wqnvh"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.319332 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48bcee5f-3c11-4784-aa10-c5673058c7b1-serving-cert\") pod \"controller-manager-879f6c89f-pc5xx\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.321939 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zjxxb"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.322636 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.323645 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-77xrv"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.324999 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-97wsg"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.326176 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.327471 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-v74l6"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.328478 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-k27df"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.329478 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.330601 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-f5cd4"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.330997 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-f5cd4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.335503 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.339050 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.341632 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-f5cd4"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.341721 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-k27df"] Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.352696 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.373502 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.392189 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.408447 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:14 crc kubenswrapper[5014]: E1006 21:33:14.408663 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:14.908637836 +0000 UTC m=+140.201674570 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.408713 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/854e3c81-1547-47c9-b15c-2f226239301d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-kgppv\" (UID: \"854e3c81-1547-47c9-b15c-2f226239301d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.408754 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/bda928e7-be85-4f32-be06-049b116323d9-certs\") pod \"machine-config-server-6lc74\" (UID: \"bda928e7-be85-4f32-be06-049b116323d9\") " pod="openshift-machine-config-operator/machine-config-server-6lc74" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.408776 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.408795 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f7160d37-518f-49e6-aee8-ce14c3267c54-images\") pod \"machine-api-operator-5694c8668f-ffz9s\" (UID: \"f7160d37-518f-49e6-aee8-ce14c3267c54\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.408817 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8703611-a93c-4633-8264-4c3ce3eaf77e-config\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.408899 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d3d88c45-8fdc-407e-a537-c45a05dedc4b-etcd-client\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.408920 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d3d88c45-8fdc-407e-a537-c45a05dedc4b-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.408938 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/854e3c81-1547-47c9-b15c-2f226239301d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-kgppv\" (UID: \"854e3c81-1547-47c9-b15c-2f226239301d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.408961 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3693c72b-1c6a-4362-bda2-6d5ea365cd38-ca-trust-extracted\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.409465 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3693c72b-1c6a-4362-bda2-6d5ea365cd38-ca-trust-extracted\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.409492 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/537a285f-ff1e-4ae8-954d-0ff696453b85-srv-cert\") pod \"olm-operator-6b444d44fb-gd58z\" (UID: \"537a285f-ff1e-4ae8-954d-0ff696453b85\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.409520 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.409538 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.409542 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d3d88c45-8fdc-407e-a537-c45a05dedc4b-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.409559 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ce11448-b517-411b-8bf1-8494fc43116e-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-tjwg2\" (UID: \"9ce11448-b517-411b-8bf1-8494fc43116e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.409614 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8703611-a93c-4633-8264-4c3ce3eaf77e-config\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.409577 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2f6a5a5c-5106-44e0-9579-ce54200179b1-apiservice-cert\") pod \"packageserver-d55dfcdfc-99ns4\" (UID: \"2f6a5a5c-5106-44e0-9579-ce54200179b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.409802 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kp7kg\" (UniqueName: \"kubernetes.io/projected/f7160d37-518f-49e6-aee8-ce14c3267c54-kube-api-access-kp7kg\") pod \"machine-api-operator-5694c8668f-ffz9s\" (UID: \"f7160d37-518f-49e6-aee8-ce14c3267c54\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.409820 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/bda928e7-be85-4f32-be06-049b116323d9-node-bootstrap-token\") pod \"machine-config-server-6lc74\" (UID: \"bda928e7-be85-4f32-be06-049b116323d9\") " pod="openshift-machine-config-operator/machine-config-server-6lc74" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.409898 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3693c72b-1c6a-4362-bda2-6d5ea365cd38-registry-certificates\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.409958 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-plugins-dir\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.409983 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-csi-data-dir\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410008 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-service-ca\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410029 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5426ed16-fe90-4dc5-9a28-13d3649074e6-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-krsnh\" (UID: \"5426ed16-fe90-4dc5-9a28-13d3649074e6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410055 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/800f445b-95a6-4098-bf5f-ea91a7ead3d0-metrics-certs\") pod \"router-default-5444994796-s45dn\" (UID: \"800f445b-95a6-4098-bf5f-ea91a7ead3d0\") " pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410081 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d3d88c45-8fdc-407e-a537-c45a05dedc4b-serving-cert\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410104 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7a0e9e21-4f5b-4e63-b976-1f16d01f357c-proxy-tls\") pod \"machine-config-controller-84d6567774-zjlcj\" (UID: \"7a0e9e21-4f5b-4e63-b976-1f16d01f357c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410115 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/854e3c81-1547-47c9-b15c-2f226239301d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-kgppv\" (UID: \"854e3c81-1547-47c9-b15c-2f226239301d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410128 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/181953a7-e119-4c51-b16f-f1ec4b63da1e-trusted-ca\") pod \"console-operator-58897d9998-zjxxb\" (UID: \"181953a7-e119-4c51-b16f-f1ec4b63da1e\") " pod="openshift-console-operator/console-operator-58897d9998-zjxxb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410152 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef9faff3-6d7d-40f6-99c6-3771e940c03b-config\") pod \"service-ca-operator-777779d784-qh6cx\" (UID: \"ef9faff3-6d7d-40f6-99c6-3771e940c03b\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410181 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7a0e9e21-4f5b-4e63-b976-1f16d01f357c-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-zjlcj\" (UID: \"7a0e9e21-4f5b-4e63-b976-1f16d01f357c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410202 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c72ed570-ae2c-4d46-92dd-69f41bda14ca-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8mx49\" (UID: \"c72ed570-ae2c-4d46-92dd-69f41bda14ca\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410225 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a8703611-a93c-4633-8264-4c3ce3eaf77e-etcd-client\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410248 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410278 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/86145702-4563-405c-bc92-cddd39e5e750-config-volume\") pod \"dns-default-97wsg\" (UID: \"86145702-4563-405c-bc92-cddd39e5e750\") " pod="openshift-dns/dns-default-97wsg" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410298 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9xgt\" (UniqueName: \"kubernetes.io/projected/a6ff5a2f-dbc4-4445-b297-e8627bfb2d04-kube-api-access-b9xgt\") pod \"machine-approver-56656f9798-t2756\" (UID: \"a6ff5a2f-dbc4-4445-b297-e8627bfb2d04\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410319 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95m8l\" (UniqueName: \"kubernetes.io/projected/5426ed16-fe90-4dc5-9a28-13d3649074e6-kube-api-access-95m8l\") pod \"openshift-apiserver-operator-796bbdcf4f-krsnh\" (UID: \"5426ed16-fe90-4dc5-9a28-13d3649074e6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410344 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1753f6a6-b8b0-4bea-9d03-a6d8539a25dd-service-ca-bundle\") pod \"authentication-operator-69f744f599-dq58g\" (UID: \"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410369 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5rs5\" (UniqueName: \"kubernetes.io/projected/6d44c7b8-2cb6-4445-afe1-de6732d5c626-kube-api-access-r5rs5\") pod \"ingress-operator-5b745b69d9-9vkf5\" (UID: \"6d44c7b8-2cb6-4445-afe1-de6732d5c626\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410393 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/627924fa-cfa6-404d-91a0-fbf6505ce05b-signing-key\") pod \"service-ca-9c57cc56f-wd97j\" (UID: \"627924fa-cfa6-404d-91a0-fbf6505ce05b\") " pod="openshift-service-ca/service-ca-9c57cc56f-wd97j" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410431 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6424j\" (UniqueName: \"kubernetes.io/projected/dcdf5ea0-5d82-4417-89fa-6c37aee2916d-kube-api-access-6424j\") pod \"kube-storage-version-migrator-operator-b67b599dd-w8lt2\" (UID: \"dcdf5ea0-5d82-4417-89fa-6c37aee2916d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410455 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a899a8b5-e5c4-456e-a469-203bbcf5445e-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-8cghq\" (UID: \"a899a8b5-e5c4-456e-a469-203bbcf5445e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410479 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/65d0615f-167e-462b-b846-cee104cdeec4-proxy-tls\") pod \"machine-config-operator-74547568cd-8pkfv\" (UID: \"65d0615f-167e-462b-b846-cee104cdeec4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410501 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d3d88c45-8fdc-407e-a537-c45a05dedc4b-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410527 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6d352ce1-8de3-49ad-83b7-62f38f1864aa-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-q4xf4\" (UID: \"6d352ce1-8de3-49ad-83b7-62f38f1864aa\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410549 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c72ed570-ae2c-4d46-92dd-69f41bda14ca-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8mx49\" (UID: \"c72ed570-ae2c-4d46-92dd-69f41bda14ca\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410572 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/15ca2d23-bdd0-4951-aeb5-6544fd2e43d5-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-cl7xx\" (UID: \"15ca2d23-bdd0-4951-aeb5-6544fd2e43d5\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410594 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-mountpoint-dir\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410635 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3693c72b-1c6a-4362-bda2-6d5ea365cd38-installation-pull-secrets\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410657 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9aabe83-7840-47cc-b3d5-72b068737094-config\") pod \"route-controller-manager-6576b87f9c-qnjng\" (UID: \"b9aabe83-7840-47cc-b3d5-72b068737094\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410680 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c72ed570-ae2c-4d46-92dd-69f41bda14ca-config\") pod \"kube-controller-manager-operator-78b949d7b-8mx49\" (UID: \"c72ed570-ae2c-4d46-92dd-69f41bda14ca\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410703 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a8703611-a93c-4633-8264-4c3ce3eaf77e-image-import-ca\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410726 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6sdbx\" (UniqueName: \"kubernetes.io/projected/9ce11448-b517-411b-8bf1-8494fc43116e-kube-api-access-6sdbx\") pod \"openshift-controller-manager-operator-756b6f6bc6-tjwg2\" (UID: \"9ce11448-b517-411b-8bf1-8494fc43116e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410747 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6d44c7b8-2cb6-4445-afe1-de6732d5c626-bound-sa-token\") pod \"ingress-operator-5b745b69d9-9vkf5\" (UID: \"6d44c7b8-2cb6-4445-afe1-de6732d5c626\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410780 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjjjw\" (UniqueName: \"kubernetes.io/projected/627924fa-cfa6-404d-91a0-fbf6505ce05b-kube-api-access-jjjjw\") pod \"service-ca-9c57cc56f-wd97j\" (UID: \"627924fa-cfa6-404d-91a0-fbf6505ce05b\") " pod="openshift-service-ca/service-ca-9c57cc56f-wd97j" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410811 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7a0e9e21-4f5b-4e63-b976-1f16d01f357c-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-zjlcj\" (UID: \"7a0e9e21-4f5b-4e63-b976-1f16d01f357c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.410993 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3693c72b-1c6a-4362-bda2-6d5ea365cd38-registry-certificates\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.411581 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a8703611-a93c-4633-8264-4c3ce3eaf77e-image-import-ca\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.411839 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d3d88c45-8fdc-407e-a537-c45a05dedc4b-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.411916 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8e91dd83-2270-4ddf-bf09-d6d1b8595453-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-6kdh6\" (UID: \"8e91dd83-2270-4ddf-bf09-d6d1b8595453\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6kdh6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.411959 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2f6a5a5c-5106-44e0-9579-ce54200179b1-webhook-cert\") pod \"packageserver-d55dfcdfc-99ns4\" (UID: \"2f6a5a5c-5106-44e0-9579-ce54200179b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412003 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412043 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvqlf\" (UniqueName: \"kubernetes.io/projected/be279f3d-aa9c-47fc-8396-8ffb895fc9b6-kube-api-access-cvqlf\") pod \"cluster-samples-operator-665b6dd947-pxwc5\" (UID: \"be279f3d-aa9c-47fc-8396-8ffb895fc9b6\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-pxwc5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412076 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5426ed16-fe90-4dc5-9a28-13d3649074e6-config\") pod \"openshift-apiserver-operator-796bbdcf4f-krsnh\" (UID: \"5426ed16-fe90-4dc5-9a28-13d3649074e6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412128 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnbg7\" (UniqueName: \"kubernetes.io/projected/26e227a5-f26a-4bfb-9d4b-0f6718a234b7-kube-api-access-cnbg7\") pod \"ingress-canary-f5cd4\" (UID: \"26e227a5-f26a-4bfb-9d4b-0f6718a234b7\") " pod="openshift-ingress-canary/ingress-canary-f5cd4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412171 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsj5g\" (UniqueName: \"kubernetes.io/projected/b9aabe83-7840-47cc-b3d5-72b068737094-kube-api-access-rsj5g\") pod \"route-controller-manager-6576b87f9c-qnjng\" (UID: \"b9aabe83-7840-47cc-b3d5-72b068737094\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412207 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/101e93eb-cfad-49df-95ce-b6b12664dd3a-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-77xrv\" (UID: \"101e93eb-cfad-49df-95ce-b6b12664dd3a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-77xrv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412240 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1753f6a6-b8b0-4bea-9d03-a6d8539a25dd-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-dq58g\" (UID: \"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412270 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1753f6a6-b8b0-4bea-9d03-a6d8539a25dd-serving-cert\") pod \"authentication-operator-69f744f599-dq58g\" (UID: \"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412432 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9aabe83-7840-47cc-b3d5-72b068737094-config\") pod \"route-controller-manager-6576b87f9c-qnjng\" (UID: \"b9aabe83-7840-47cc-b3d5-72b068737094\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412780 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpznr\" (UniqueName: \"kubernetes.io/projected/101e93eb-cfad-49df-95ce-b6b12664dd3a-kube-api-access-gpznr\") pod \"control-plane-machine-set-operator-78cbb6b69f-77xrv\" (UID: \"101e93eb-cfad-49df-95ce-b6b12664dd3a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-77xrv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412808 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/54bd74da-566f-468a-a57f-fbbd0bfd2a50-etcd-ca\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412840 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/86145702-4563-405c-bc92-cddd39e5e750-metrics-tls\") pod \"dns-default-97wsg\" (UID: \"86145702-4563-405c-bc92-cddd39e5e750\") " pod="openshift-dns/dns-default-97wsg" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412873 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54bd74da-566f-468a-a57f-fbbd0bfd2a50-config\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412907 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a8703611-a93c-4633-8264-4c3ce3eaf77e-node-pullsecrets\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412932 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1e659c79-33c6-49d3-a333-5280ece9fa5b-secret-volume\") pod \"collect-profiles-29329770-k9xbb\" (UID: \"1e659c79-33c6-49d3-a333-5280ece9fa5b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412947 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6ff5a2f-dbc4-4445-b297-e8627bfb2d04-config\") pod \"machine-approver-56656f9798-t2756\" (UID: \"a6ff5a2f-dbc4-4445-b297-e8627bfb2d04\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412968 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dcdf5ea0-5d82-4417-89fa-6c37aee2916d-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-w8lt2\" (UID: \"dcdf5ea0-5d82-4417-89fa-6c37aee2916d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.412986 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-registry-tls\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.413001 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/800f445b-95a6-4098-bf5f-ea91a7ead3d0-default-certificate\") pod \"router-default-5444994796-s45dn\" (UID: \"800f445b-95a6-4098-bf5f-ea91a7ead3d0\") " pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.413018 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6k4t\" (UniqueName: \"kubernetes.io/projected/800f445b-95a6-4098-bf5f-ea91a7ead3d0-kube-api-access-p6k4t\") pod \"router-default-5444994796-s45dn\" (UID: \"800f445b-95a6-4098-bf5f-ea91a7ead3d0\") " pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.413061 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6txwk\" (UniqueName: \"kubernetes.io/projected/181953a7-e119-4c51-b16f-f1ec4b63da1e-kube-api-access-6txwk\") pod \"console-operator-58897d9998-zjxxb\" (UID: \"181953a7-e119-4c51-b16f-f1ec4b63da1e\") " pod="openshift-console-operator/console-operator-58897d9998-zjxxb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.413078 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a899a8b5-e5c4-456e-a469-203bbcf5445e-config\") pod \"kube-apiserver-operator-766d6c64bb-8cghq\" (UID: \"a899a8b5-e5c4-456e-a469-203bbcf5445e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.413095 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-bound-sa-token\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.413110 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a8703611-a93c-4633-8264-4c3ce3eaf77e-audit-dir\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.413126 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8mzs\" (UniqueName: \"kubernetes.io/projected/2bb77525-303a-4691-81d2-0bbeb6eeed9c-kube-api-access-h8mzs\") pod \"downloads-7954f5f757-zh4s7\" (UID: \"2bb77525-303a-4691-81d2-0bbeb6eeed9c\") " pod="openshift-console/downloads-7954f5f757-zh4s7" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.413204 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a8703611-a93c-4633-8264-4c3ce3eaf77e-node-pullsecrets\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.413818 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/854e3c81-1547-47c9-b15c-2f226239301d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-kgppv\" (UID: \"854e3c81-1547-47c9-b15c-2f226239301d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.413942 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3693c72b-1c6a-4362-bda2-6d5ea365cd38-installation-pull-secrets\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.414047 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d3d88c45-8fdc-407e-a537-c45a05dedc4b-etcd-client\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.414205 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a8703611-a93c-4633-8264-4c3ce3eaf77e-audit-dir\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.414240 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2qpv\" (UniqueName: \"kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-kube-api-access-q2qpv\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.414273 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hr4qv\" (UniqueName: \"kubernetes.io/projected/d1e1ed07-468a-4627-9690-ab83129e9a93-kube-api-access-hr4qv\") pod \"migrator-59844c95c7-n59hv\" (UID: \"d1e1ed07-468a-4627-9690-ab83129e9a93\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n59hv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.414293 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/a6ff5a2f-dbc4-4445-b297-e8627bfb2d04-machine-approver-tls\") pod \"machine-approver-56656f9798-t2756\" (UID: \"a6ff5a2f-dbc4-4445-b297-e8627bfb2d04\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.414318 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.414344 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/fbda3f6d-582e-4290-8939-e06c2f971f0e-available-featuregates\") pod \"openshift-config-operator-7777fb866f-6dcxq\" (UID: \"fbda3f6d-582e-4290-8939-e06c2f971f0e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.414380 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e659c79-33c6-49d3-a333-5280ece9fa5b-config-volume\") pod \"collect-profiles-29329770-k9xbb\" (UID: \"1e659c79-33c6-49d3-a333-5280ece9fa5b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.414396 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a6ff5a2f-dbc4-4445-b297-e8627bfb2d04-auth-proxy-config\") pod \"machine-approver-56656f9798-t2756\" (UID: \"a6ff5a2f-dbc4-4445-b297-e8627bfb2d04\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.414414 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/54bd74da-566f-468a-a57f-fbbd0bfd2a50-etcd-service-ca\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.414435 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/65d0615f-167e-462b-b846-cee104cdeec4-images\") pod \"machine-config-operator-74547568cd-8pkfv\" (UID: \"65d0615f-167e-462b-b846-cee104cdeec4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.414475 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a8703611-a93c-4633-8264-4c3ce3eaf77e-encryption-config\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.414491 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/2f6a5a5c-5106-44e0-9579-ce54200179b1-tmpfs\") pod \"packageserver-d55dfcdfc-99ns4\" (UID: \"2f6a5a5c-5106-44e0-9579-ce54200179b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.414534 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43df857e-f7f9-45e8-97e7-21adc3167678-console-oauth-config\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415048 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/65d0615f-167e-462b-b846-cee104cdeec4-images\") pod \"machine-config-operator-74547568cd-8pkfv\" (UID: \"65d0615f-167e-462b-b846-cee104cdeec4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415255 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ce11448-b517-411b-8bf1-8494fc43116e-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-tjwg2\" (UID: \"9ce11448-b517-411b-8bf1-8494fc43116e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415292 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9aabe83-7840-47cc-b3d5-72b068737094-serving-cert\") pod \"route-controller-manager-6576b87f9c-qnjng\" (UID: \"b9aabe83-7840-47cc-b3d5-72b068737094\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415312 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/627924fa-cfa6-404d-91a0-fbf6505ce05b-signing-cabundle\") pod \"service-ca-9c57cc56f-wd97j\" (UID: \"627924fa-cfa6-404d-91a0-fbf6505ce05b\") " pod="openshift-service-ca/service-ca-9c57cc56f-wd97j" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415335 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415361 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415388 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a8703611-a93c-4633-8264-4c3ce3eaf77e-serving-cert\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415730 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzhrl\" (UniqueName: \"kubernetes.io/projected/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-kube-api-access-mzhrl\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415762 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3693c72b-1c6a-4362-bda2-6d5ea365cd38-trusted-ca\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415781 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/800f445b-95a6-4098-bf5f-ea91a7ead3d0-stats-auth\") pod \"router-default-5444994796-s45dn\" (UID: \"800f445b-95a6-4098-bf5f-ea91a7ead3d0\") " pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415798 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5p2l9\" (UniqueName: \"kubernetes.io/projected/8e91dd83-2270-4ddf-bf09-d6d1b8595453-kube-api-access-5p2l9\") pod \"multus-admission-controller-857f4d67dd-6kdh6\" (UID: \"8e91dd83-2270-4ddf-bf09-d6d1b8595453\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6kdh6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415814 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-console-config\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415835 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vd5t\" (UniqueName: \"kubernetes.io/projected/1753f6a6-b8b0-4bea-9d03-a6d8539a25dd-kube-api-access-6vd5t\") pod \"authentication-operator-69f744f599-dq58g\" (UID: \"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415851 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcdf5ea0-5d82-4417-89fa-6c37aee2916d-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-w8lt2\" (UID: \"dcdf5ea0-5d82-4417-89fa-6c37aee2916d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415866 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a899a8b5-e5c4-456e-a469-203bbcf5445e-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-8cghq\" (UID: \"a899a8b5-e5c4-456e-a469-203bbcf5445e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415883 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d352ce1-8de3-49ad-83b7-62f38f1864aa-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-q4xf4\" (UID: \"6d352ce1-8de3-49ad-83b7-62f38f1864aa\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415906 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/26e227a5-f26a-4bfb-9d4b-0f6718a234b7-cert\") pod \"ingress-canary-f5cd4\" (UID: \"26e227a5-f26a-4bfb-9d4b-0f6718a234b7\") " pod="openshift-ingress-canary/ingress-canary-f5cd4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415927 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w75jv\" (UniqueName: \"kubernetes.io/projected/fbda3f6d-582e-4290-8939-e06c2f971f0e-kube-api-access-w75jv\") pod \"openshift-config-operator-7777fb866f-6dcxq\" (UID: \"fbda3f6d-582e-4290-8939-e06c2f971f0e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415944 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6d44c7b8-2cb6-4445-afe1-de6732d5c626-metrics-tls\") pod \"ingress-operator-5b745b69d9-9vkf5\" (UID: \"6d44c7b8-2cb6-4445-afe1-de6732d5c626\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415962 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/181953a7-e119-4c51-b16f-f1ec4b63da1e-config\") pod \"console-operator-58897d9998-zjxxb\" (UID: \"181953a7-e119-4c51-b16f-f1ec4b63da1e\") " pod="openshift-console-operator/console-operator-58897d9998-zjxxb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415977 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/01a961cf-0904-406d-862a-027b53178111-profile-collector-cert\") pod \"catalog-operator-68c6474976-8mlsl\" (UID: \"01a961cf-0904-406d-862a-027b53178111\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415994 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2jtw\" (UniqueName: \"kubernetes.io/projected/ef9faff3-6d7d-40f6-99c6-3771e940c03b-kube-api-access-w2jtw\") pod \"service-ca-operator-777779d784-qh6cx\" (UID: \"ef9faff3-6d7d-40f6-99c6-3771e940c03b\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416014 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43df857e-f7f9-45e8-97e7-21adc3167678-console-serving-cert\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416027 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/54bd74da-566f-468a-a57f-fbbd0bfd2a50-etcd-client\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416043 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrcn2\" (UniqueName: \"kubernetes.io/projected/7a0e9e21-4f5b-4e63-b976-1f16d01f357c-kube-api-access-qrcn2\") pod \"machine-config-controller-84d6567774-zjlcj\" (UID: \"7a0e9e21-4f5b-4e63-b976-1f16d01f357c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416065 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7hcb\" (UniqueName: \"kubernetes.io/projected/65d0615f-167e-462b-b846-cee104cdeec4-kube-api-access-p7hcb\") pod \"machine-config-operator-74547568cd-8pkfv\" (UID: \"65d0615f-167e-462b-b846-cee104cdeec4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416081 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/181953a7-e119-4c51-b16f-f1ec4b63da1e-serving-cert\") pod \"console-operator-58897d9998-zjxxb\" (UID: \"181953a7-e119-4c51-b16f-f1ec4b63da1e\") " pod="openshift-console-operator/console-operator-58897d9998-zjxxb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416096 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dc45v\" (UniqueName: \"kubernetes.io/projected/bda928e7-be85-4f32-be06-049b116323d9-kube-api-access-dc45v\") pod \"machine-config-server-6lc74\" (UID: \"bda928e7-be85-4f32-be06-049b116323d9\") " pod="openshift-machine-config-operator/machine-config-server-6lc74" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416110 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a8703611-a93c-4633-8264-4c3ce3eaf77e-etcd-serving-ca\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416126 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjv2r\" (UniqueName: \"kubernetes.io/projected/86145702-4563-405c-bc92-cddd39e5e750-kube-api-access-bjv2r\") pod \"dns-default-97wsg\" (UID: \"86145702-4563-405c-bc92-cddd39e5e750\") " pod="openshift-dns/dns-default-97wsg" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416144 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/800f445b-95a6-4098-bf5f-ea91a7ead3d0-service-ca-bundle\") pod \"router-default-5444994796-s45dn\" (UID: \"800f445b-95a6-4098-bf5f-ea91a7ead3d0\") " pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416163 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a8703611-a93c-4633-8264-4c3ce3eaf77e-audit\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416177 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a8703611-a93c-4633-8264-4c3ce3eaf77e-trusted-ca-bundle\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416201 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/be279f3d-aa9c-47fc-8396-8ffb895fc9b6-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-pxwc5\" (UID: \"be279f3d-aa9c-47fc-8396-8ffb895fc9b6\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-pxwc5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416219 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6d44c7b8-2cb6-4445-afe1-de6732d5c626-trusted-ca\") pod \"ingress-operator-5b745b69d9-9vkf5\" (UID: \"6d44c7b8-2cb6-4445-afe1-de6732d5c626\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416236 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1753f6a6-b8b0-4bea-9d03-a6d8539a25dd-config\") pod \"authentication-operator-69f744f599-dq58g\" (UID: \"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416279 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sczfk\" (UniqueName: \"kubernetes.io/projected/537a285f-ff1e-4ae8-954d-0ff696453b85-kube-api-access-sczfk\") pod \"olm-operator-6b444d44fb-gd58z\" (UID: \"537a285f-ff1e-4ae8-954d-0ff696453b85\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415664 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416921 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/800f445b-95a6-4098-bf5f-ea91a7ead3d0-metrics-certs\") pod \"router-default-5444994796-s45dn\" (UID: \"800f445b-95a6-4098-bf5f-ea91a7ead3d0\") " pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416327 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6a8033e0-8d2f-4f40-bab2-b2670cefe0b3-metrics-tls\") pod \"dns-operator-744455d44c-wqnvh\" (UID: \"6a8033e0-8d2f-4f40-bab2-b2670cefe0b3\") " pod="openshift-dns-operator/dns-operator-744455d44c-wqnvh" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.417005 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-trusted-ca-bundle\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.416926 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/fbda3f6d-582e-4290-8939-e06c2f971f0e-available-featuregates\") pod \"openshift-config-operator-7777fb866f-6dcxq\" (UID: \"fbda3f6d-582e-4290-8939-e06c2f971f0e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.417363 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/627924fa-cfa6-404d-91a0-fbf6505ce05b-signing-cabundle\") pod \"service-ca-9c57cc56f-wd97j\" (UID: \"627924fa-cfa6-404d-91a0-fbf6505ce05b\") " pod="openshift-service-ca/service-ca-9c57cc56f-wd97j" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.415683 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7a0e9e21-4f5b-4e63-b976-1f16d01f357c-proxy-tls\") pod \"machine-config-controller-84d6567774-zjlcj\" (UID: \"7a0e9e21-4f5b-4e63-b976-1f16d01f357c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.417841 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/800f445b-95a6-4098-bf5f-ea91a7ead3d0-service-ca-bundle\") pod \"router-default-5444994796-s45dn\" (UID: \"800f445b-95a6-4098-bf5f-ea91a7ead3d0\") " pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.417891 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a8703611-a93c-4633-8264-4c3ce3eaf77e-encryption-config\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.417978 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a8703611-a93c-4633-8264-4c3ce3eaf77e-audit\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418188 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/537a285f-ff1e-4ae8-954d-0ff696453b85-profile-collector-cert\") pod \"olm-operator-6b444d44fb-gd58z\" (UID: \"537a285f-ff1e-4ae8-954d-0ff696453b85\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418235 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xr2lm\" (UniqueName: \"kubernetes.io/projected/54bd74da-566f-468a-a57f-fbbd0bfd2a50-kube-api-access-xr2lm\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418266 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-audit-policies\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418300 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/f7160d37-518f-49e6-aee8-ce14c3267c54-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-ffz9s\" (UID: \"f7160d37-518f-49e6-aee8-ce14c3267c54\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418343 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5474w\" (UniqueName: \"kubernetes.io/projected/d3d88c45-8fdc-407e-a537-c45a05dedc4b-kube-api-access-5474w\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418379 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6wrq\" (UniqueName: \"kubernetes.io/projected/1e659c79-33c6-49d3-a333-5280ece9fa5b-kube-api-access-k6wrq\") pod \"collect-profiles-29329770-k9xbb\" (UID: \"1e659c79-33c6-49d3-a333-5280ece9fa5b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418415 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fbda3f6d-582e-4290-8939-e06c2f971f0e-serving-cert\") pod \"openshift-config-operator-7777fb866f-6dcxq\" (UID: \"fbda3f6d-582e-4290-8939-e06c2f971f0e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418455 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d3d88c45-8fdc-407e-a537-c45a05dedc4b-audit-dir\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418463 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3693c72b-1c6a-4362-bda2-6d5ea365cd38-trusted-ca\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418419 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a8703611-a93c-4633-8264-4c3ce3eaf77e-etcd-client\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418486 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-registry-tls\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418486 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/54bd74da-566f-468a-a57f-fbbd0bfd2a50-serving-cert\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418537 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418544 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d3d88c45-8fdc-407e-a537-c45a05dedc4b-audit-dir\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418559 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418596 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418607 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/800f445b-95a6-4098-bf5f-ea91a7ead3d0-default-certificate\") pod \"router-default-5444994796-s45dn\" (UID: \"800f445b-95a6-4098-bf5f-ea91a7ead3d0\") " pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418723 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7160d37-518f-49e6-aee8-ce14c3267c54-config\") pod \"machine-api-operator-5694c8668f-ffz9s\" (UID: \"f7160d37-518f-49e6-aee8-ce14c3267c54\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418750 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6xrf\" (UniqueName: \"kubernetes.io/projected/a8703611-a93c-4633-8264-4c3ce3eaf77e-kube-api-access-f6xrf\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418769 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-registration-dir\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: E1006 21:33:14.418812 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:14.91880277 +0000 UTC m=+140.211839504 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.418833 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d3d88c45-8fdc-407e-a537-c45a05dedc4b-audit-policies\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419142 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9aabe83-7840-47cc-b3d5-72b068737094-serving-cert\") pod \"route-controller-manager-6576b87f9c-qnjng\" (UID: \"b9aabe83-7840-47cc-b3d5-72b068737094\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419159 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7nsg\" (UniqueName: \"kubernetes.io/projected/43df857e-f7f9-45e8-97e7-21adc3167678-kube-api-access-m7nsg\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419203 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/854e3c81-1547-47c9-b15c-2f226239301d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-kgppv\" (UID: \"854e3c81-1547-47c9-b15c-2f226239301d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419223 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6rq5\" (UniqueName: \"kubernetes.io/projected/15ca2d23-bdd0-4951-aeb5-6544fd2e43d5-kube-api-access-w6rq5\") pod \"package-server-manager-789f6589d5-cl7xx\" (UID: \"15ca2d23-bdd0-4951-aeb5-6544fd2e43d5\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419258 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/65d0615f-167e-462b-b846-cee104cdeec4-auth-proxy-config\") pod \"machine-config-operator-74547568cd-8pkfv\" (UID: \"65d0615f-167e-462b-b846-cee104cdeec4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419278 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/01a961cf-0904-406d-862a-027b53178111-srv-cert\") pod \"catalog-operator-68c6474976-8mlsl\" (UID: \"01a961cf-0904-406d-862a-027b53178111\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419294 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/720c691e-a28e-4b39-9571-86e321399306-audit-dir\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419313 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419374 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d352ce1-8de3-49ad-83b7-62f38f1864aa-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-q4xf4\" (UID: \"6d352ce1-8de3-49ad-83b7-62f38f1864aa\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419415 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d3d88c45-8fdc-407e-a537-c45a05dedc4b-encryption-config\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419431 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ef9faff3-6d7d-40f6-99c6-3771e940c03b-serving-cert\") pod \"service-ca-operator-777779d784-qh6cx\" (UID: \"ef9faff3-6d7d-40f6-99c6-3771e940c03b\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419445 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-socket-dir\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419463 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-oauth-serving-cert\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419494 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmx4v\" (UniqueName: \"kubernetes.io/projected/6a8033e0-8d2f-4f40-bab2-b2670cefe0b3-kube-api-access-xmx4v\") pod \"dns-operator-744455d44c-wqnvh\" (UID: \"6a8033e0-8d2f-4f40-bab2-b2670cefe0b3\") " pod="openshift-dns-operator/dns-operator-744455d44c-wqnvh" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419511 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zw594\" (UniqueName: \"kubernetes.io/projected/720c691e-a28e-4b39-9571-86e321399306-kube-api-access-zw594\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419529 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b9aabe83-7840-47cc-b3d5-72b068737094-client-ca\") pod \"route-controller-manager-6576b87f9c-qnjng\" (UID: \"b9aabe83-7840-47cc-b3d5-72b068737094\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419546 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9f5n\" (UniqueName: \"kubernetes.io/projected/854e3c81-1547-47c9-b15c-2f226239301d-kube-api-access-w9f5n\") pod \"cluster-image-registry-operator-dc59b4c8b-kgppv\" (UID: \"854e3c81-1547-47c9-b15c-2f226239301d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419575 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwwbj\" (UniqueName: \"kubernetes.io/projected/01a961cf-0904-406d-862a-027b53178111-kube-api-access-vwwbj\") pod \"catalog-operator-68c6474976-8mlsl\" (UID: \"01a961cf-0904-406d-862a-027b53178111\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419591 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gc5pq\" (UniqueName: \"kubernetes.io/projected/2f6a5a5c-5106-44e0-9579-ce54200179b1-kube-api-access-gc5pq\") pod \"packageserver-d55dfcdfc-99ns4\" (UID: \"2f6a5a5c-5106-44e0-9579-ce54200179b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.419753 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/65d0615f-167e-462b-b846-cee104cdeec4-auth-proxy-config\") pod \"machine-config-operator-74547568cd-8pkfv\" (UID: \"65d0615f-167e-462b-b846-cee104cdeec4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.420280 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d3d88c45-8fdc-407e-a537-c45a05dedc4b-audit-policies\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.420337 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a8703611-a93c-4633-8264-4c3ce3eaf77e-etcd-serving-ca\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.421313 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/800f445b-95a6-4098-bf5f-ea91a7ead3d0-stats-auth\") pod \"router-default-5444994796-s45dn\" (UID: \"800f445b-95a6-4098-bf5f-ea91a7ead3d0\") " pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.421349 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/627924fa-cfa6-404d-91a0-fbf6505ce05b-signing-key\") pod \"service-ca-9c57cc56f-wd97j\" (UID: \"627924fa-cfa6-404d-91a0-fbf6505ce05b\") " pod="openshift-service-ca/service-ca-9c57cc56f-wd97j" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.421396 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b9aabe83-7840-47cc-b3d5-72b068737094-client-ca\") pod \"route-controller-manager-6576b87f9c-qnjng\" (UID: \"b9aabe83-7840-47cc-b3d5-72b068737094\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.421710 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/65d0615f-167e-462b-b846-cee104cdeec4-proxy-tls\") pod \"machine-config-operator-74547568cd-8pkfv\" (UID: \"65d0615f-167e-462b-b846-cee104cdeec4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.421817 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fbda3f6d-582e-4290-8939-e06c2f971f0e-serving-cert\") pod \"openshift-config-operator-7777fb866f-6dcxq\" (UID: \"fbda3f6d-582e-4290-8939-e06c2f971f0e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.421924 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a8703611-a93c-4633-8264-4c3ce3eaf77e-trusted-ca-bundle\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.423261 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d3d88c45-8fdc-407e-a537-c45a05dedc4b-serving-cert\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.424975 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d3d88c45-8fdc-407e-a537-c45a05dedc4b-encryption-config\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.425376 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a8703611-a93c-4633-8264-4c3ce3eaf77e-serving-cert\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.425837 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/15ca2d23-bdd0-4951-aeb5-6544fd2e43d5-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-cl7xx\" (UID: \"15ca2d23-bdd0-4951-aeb5-6544fd2e43d5\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.443003 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.453312 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.473500 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.492782 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.513940 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.520676 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:14 crc kubenswrapper[5014]: E1006 21:33:14.520899 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.02086718 +0000 UTC m=+140.313903954 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521055 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vd5t\" (UniqueName: \"kubernetes.io/projected/1753f6a6-b8b0-4bea-9d03-a6d8539a25dd-kube-api-access-6vd5t\") pod \"authentication-operator-69f744f599-dq58g\" (UID: \"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521104 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcdf5ea0-5d82-4417-89fa-6c37aee2916d-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-w8lt2\" (UID: \"dcdf5ea0-5d82-4417-89fa-6c37aee2916d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521139 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d352ce1-8de3-49ad-83b7-62f38f1864aa-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-q4xf4\" (UID: \"6d352ce1-8de3-49ad-83b7-62f38f1864aa\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521173 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/26e227a5-f26a-4bfb-9d4b-0f6718a234b7-cert\") pod \"ingress-canary-f5cd4\" (UID: \"26e227a5-f26a-4bfb-9d4b-0f6718a234b7\") " pod="openshift-ingress-canary/ingress-canary-f5cd4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521205 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a899a8b5-e5c4-456e-a469-203bbcf5445e-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-8cghq\" (UID: \"a899a8b5-e5c4-456e-a469-203bbcf5445e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521253 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6d44c7b8-2cb6-4445-afe1-de6732d5c626-metrics-tls\") pod \"ingress-operator-5b745b69d9-9vkf5\" (UID: \"6d44c7b8-2cb6-4445-afe1-de6732d5c626\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521293 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/181953a7-e119-4c51-b16f-f1ec4b63da1e-config\") pod \"console-operator-58897d9998-zjxxb\" (UID: \"181953a7-e119-4c51-b16f-f1ec4b63da1e\") " pod="openshift-console-operator/console-operator-58897d9998-zjxxb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521329 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/01a961cf-0904-406d-862a-027b53178111-profile-collector-cert\") pod \"catalog-operator-68c6474976-8mlsl\" (UID: \"01a961cf-0904-406d-862a-027b53178111\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521360 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2jtw\" (UniqueName: \"kubernetes.io/projected/ef9faff3-6d7d-40f6-99c6-3771e940c03b-kube-api-access-w2jtw\") pod \"service-ca-operator-777779d784-qh6cx\" (UID: \"ef9faff3-6d7d-40f6-99c6-3771e940c03b\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521399 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43df857e-f7f9-45e8-97e7-21adc3167678-console-serving-cert\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521433 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/54bd74da-566f-468a-a57f-fbbd0bfd2a50-etcd-client\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521487 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/181953a7-e119-4c51-b16f-f1ec4b63da1e-serving-cert\") pod \"console-operator-58897d9998-zjxxb\" (UID: \"181953a7-e119-4c51-b16f-f1ec4b63da1e\") " pod="openshift-console-operator/console-operator-58897d9998-zjxxb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521526 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dc45v\" (UniqueName: \"kubernetes.io/projected/bda928e7-be85-4f32-be06-049b116323d9-kube-api-access-dc45v\") pod \"machine-config-server-6lc74\" (UID: \"bda928e7-be85-4f32-be06-049b116323d9\") " pod="openshift-machine-config-operator/machine-config-server-6lc74" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521578 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjv2r\" (UniqueName: \"kubernetes.io/projected/86145702-4563-405c-bc92-cddd39e5e750-kube-api-access-bjv2r\") pod \"dns-default-97wsg\" (UID: \"86145702-4563-405c-bc92-cddd39e5e750\") " pod="openshift-dns/dns-default-97wsg" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521638 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1753f6a6-b8b0-4bea-9d03-a6d8539a25dd-config\") pod \"authentication-operator-69f744f599-dq58g\" (UID: \"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521699 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/be279f3d-aa9c-47fc-8396-8ffb895fc9b6-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-pxwc5\" (UID: \"be279f3d-aa9c-47fc-8396-8ffb895fc9b6\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-pxwc5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521739 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6d44c7b8-2cb6-4445-afe1-de6732d5c626-trusted-ca\") pod \"ingress-operator-5b745b69d9-9vkf5\" (UID: \"6d44c7b8-2cb6-4445-afe1-de6732d5c626\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521772 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sczfk\" (UniqueName: \"kubernetes.io/projected/537a285f-ff1e-4ae8-954d-0ff696453b85-kube-api-access-sczfk\") pod \"olm-operator-6b444d44fb-gd58z\" (UID: \"537a285f-ff1e-4ae8-954d-0ff696453b85\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521806 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6a8033e0-8d2f-4f40-bab2-b2670cefe0b3-metrics-tls\") pod \"dns-operator-744455d44c-wqnvh\" (UID: \"6a8033e0-8d2f-4f40-bab2-b2670cefe0b3\") " pod="openshift-dns-operator/dns-operator-744455d44c-wqnvh" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521942 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-trusted-ca-bundle\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.521996 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/f7160d37-518f-49e6-aee8-ce14c3267c54-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-ffz9s\" (UID: \"f7160d37-518f-49e6-aee8-ce14c3267c54\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.522041 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6wrq\" (UniqueName: \"kubernetes.io/projected/1e659c79-33c6-49d3-a333-5280ece9fa5b-kube-api-access-k6wrq\") pod \"collect-profiles-29329770-k9xbb\" (UID: \"1e659c79-33c6-49d3-a333-5280ece9fa5b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.522090 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/537a285f-ff1e-4ae8-954d-0ff696453b85-profile-collector-cert\") pod \"olm-operator-6b444d44fb-gd58z\" (UID: \"537a285f-ff1e-4ae8-954d-0ff696453b85\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.522136 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xr2lm\" (UniqueName: \"kubernetes.io/projected/54bd74da-566f-468a-a57f-fbbd0bfd2a50-kube-api-access-xr2lm\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.522176 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-audit-policies\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.522209 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/54bd74da-566f-468a-a57f-fbbd0bfd2a50-serving-cert\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.522252 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.522286 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.522356 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1753f6a6-b8b0-4bea-9d03-a6d8539a25dd-config\") pod \"authentication-operator-69f744f599-dq58g\" (UID: \"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.522527 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.522654 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7160d37-518f-49e6-aee8-ce14c3267c54-config\") pod \"machine-api-operator-5694c8668f-ffz9s\" (UID: \"f7160d37-518f-49e6-aee8-ce14c3267c54\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:14 crc kubenswrapper[5014]: E1006 21:33:14.522752 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.022731933 +0000 UTC m=+140.315768707 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.522812 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-registration-dir\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.522858 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7nsg\" (UniqueName: \"kubernetes.io/projected/43df857e-f7f9-45e8-97e7-21adc3167678-kube-api-access-m7nsg\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.522914 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/01a961cf-0904-406d-862a-027b53178111-srv-cert\") pod \"catalog-operator-68c6474976-8mlsl\" (UID: \"01a961cf-0904-406d-862a-027b53178111\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.522957 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/720c691e-a28e-4b39-9571-86e321399306-audit-dir\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.522991 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523025 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d352ce1-8de3-49ad-83b7-62f38f1864aa-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-q4xf4\" (UID: \"6d352ce1-8de3-49ad-83b7-62f38f1864aa\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523073 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ef9faff3-6d7d-40f6-99c6-3771e940c03b-serving-cert\") pod \"service-ca-operator-777779d784-qh6cx\" (UID: \"ef9faff3-6d7d-40f6-99c6-3771e940c03b\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523094 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-registration-dir\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523112 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-socket-dir\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523150 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-oauth-serving-cert\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523156 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/720c691e-a28e-4b39-9571-86e321399306-audit-dir\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523184 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmx4v\" (UniqueName: \"kubernetes.io/projected/6a8033e0-8d2f-4f40-bab2-b2670cefe0b3-kube-api-access-xmx4v\") pod \"dns-operator-744455d44c-wqnvh\" (UID: \"6a8033e0-8d2f-4f40-bab2-b2670cefe0b3\") " pod="openshift-dns-operator/dns-operator-744455d44c-wqnvh" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523251 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zw594\" (UniqueName: \"kubernetes.io/projected/720c691e-a28e-4b39-9571-86e321399306-kube-api-access-zw594\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523317 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwwbj\" (UniqueName: \"kubernetes.io/projected/01a961cf-0904-406d-862a-027b53178111-kube-api-access-vwwbj\") pod \"catalog-operator-68c6474976-8mlsl\" (UID: \"01a961cf-0904-406d-862a-027b53178111\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523359 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-socket-dir\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523362 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gc5pq\" (UniqueName: \"kubernetes.io/projected/2f6a5a5c-5106-44e0-9579-ce54200179b1-kube-api-access-gc5pq\") pod \"packageserver-d55dfcdfc-99ns4\" (UID: \"2f6a5a5c-5106-44e0-9579-ce54200179b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523413 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/bda928e7-be85-4f32-be06-049b116323d9-certs\") pod \"machine-config-server-6lc74\" (UID: \"bda928e7-be85-4f32-be06-049b116323d9\") " pod="openshift-machine-config-operator/machine-config-server-6lc74" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523438 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523462 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f7160d37-518f-49e6-aee8-ce14c3267c54-images\") pod \"machine-api-operator-5694c8668f-ffz9s\" (UID: \"f7160d37-518f-49e6-aee8-ce14c3267c54\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523489 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/537a285f-ff1e-4ae8-954d-0ff696453b85-srv-cert\") pod \"olm-operator-6b444d44fb-gd58z\" (UID: \"537a285f-ff1e-4ae8-954d-0ff696453b85\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523511 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523536 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523561 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ce11448-b517-411b-8bf1-8494fc43116e-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-tjwg2\" (UID: \"9ce11448-b517-411b-8bf1-8494fc43116e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523583 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2f6a5a5c-5106-44e0-9579-ce54200179b1-apiservice-cert\") pod \"packageserver-d55dfcdfc-99ns4\" (UID: \"2f6a5a5c-5106-44e0-9579-ce54200179b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523607 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kp7kg\" (UniqueName: \"kubernetes.io/projected/f7160d37-518f-49e6-aee8-ce14c3267c54-kube-api-access-kp7kg\") pod \"machine-api-operator-5694c8668f-ffz9s\" (UID: \"f7160d37-518f-49e6-aee8-ce14c3267c54\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523693 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-plugins-dir\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523715 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-csi-data-dir\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523736 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/bda928e7-be85-4f32-be06-049b116323d9-node-bootstrap-token\") pod \"machine-config-server-6lc74\" (UID: \"bda928e7-be85-4f32-be06-049b116323d9\") " pod="openshift-machine-config-operator/machine-config-server-6lc74" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523762 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/181953a7-e119-4c51-b16f-f1ec4b63da1e-trusted-ca\") pod \"console-operator-58897d9998-zjxxb\" (UID: \"181953a7-e119-4c51-b16f-f1ec4b63da1e\") " pod="openshift-console-operator/console-operator-58897d9998-zjxxb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523783 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-service-ca\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523805 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5426ed16-fe90-4dc5-9a28-13d3649074e6-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-krsnh\" (UID: \"5426ed16-fe90-4dc5-9a28-13d3649074e6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523830 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c72ed570-ae2c-4d46-92dd-69f41bda14ca-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8mx49\" (UID: \"c72ed570-ae2c-4d46-92dd-69f41bda14ca\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523852 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef9faff3-6d7d-40f6-99c6-3771e940c03b-config\") pod \"service-ca-operator-777779d784-qh6cx\" (UID: \"ef9faff3-6d7d-40f6-99c6-3771e940c03b\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523875 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523896 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/86145702-4563-405c-bc92-cddd39e5e750-config-volume\") pod \"dns-default-97wsg\" (UID: \"86145702-4563-405c-bc92-cddd39e5e750\") " pod="openshift-dns/dns-default-97wsg" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523922 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9xgt\" (UniqueName: \"kubernetes.io/projected/a6ff5a2f-dbc4-4445-b297-e8627bfb2d04-kube-api-access-b9xgt\") pod \"machine-approver-56656f9798-t2756\" (UID: \"a6ff5a2f-dbc4-4445-b297-e8627bfb2d04\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523919 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d352ce1-8de3-49ad-83b7-62f38f1864aa-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-q4xf4\" (UID: \"6d352ce1-8de3-49ad-83b7-62f38f1864aa\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523944 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95m8l\" (UniqueName: \"kubernetes.io/projected/5426ed16-fe90-4dc5-9a28-13d3649074e6-kube-api-access-95m8l\") pod \"openshift-apiserver-operator-796bbdcf4f-krsnh\" (UID: \"5426ed16-fe90-4dc5-9a28-13d3649074e6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523969 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1753f6a6-b8b0-4bea-9d03-a6d8539a25dd-service-ca-bundle\") pod \"authentication-operator-69f744f599-dq58g\" (UID: \"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.523995 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5rs5\" (UniqueName: \"kubernetes.io/projected/6d44c7b8-2cb6-4445-afe1-de6732d5c626-kube-api-access-r5rs5\") pod \"ingress-operator-5b745b69d9-9vkf5\" (UID: \"6d44c7b8-2cb6-4445-afe1-de6732d5c626\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524007 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-trusted-ca-bundle\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524033 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6d352ce1-8de3-49ad-83b7-62f38f1864aa-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-q4xf4\" (UID: \"6d352ce1-8de3-49ad-83b7-62f38f1864aa\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524055 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c72ed570-ae2c-4d46-92dd-69f41bda14ca-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8mx49\" (UID: \"c72ed570-ae2c-4d46-92dd-69f41bda14ca\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524079 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6424j\" (UniqueName: \"kubernetes.io/projected/dcdf5ea0-5d82-4417-89fa-6c37aee2916d-kube-api-access-6424j\") pod \"kube-storage-version-migrator-operator-b67b599dd-w8lt2\" (UID: \"dcdf5ea0-5d82-4417-89fa-6c37aee2916d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524103 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a899a8b5-e5c4-456e-a469-203bbcf5445e-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-8cghq\" (UID: \"a899a8b5-e5c4-456e-a469-203bbcf5445e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524126 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-mountpoint-dir\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524150 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c72ed570-ae2c-4d46-92dd-69f41bda14ca-config\") pod \"kube-controller-manager-operator-78b949d7b-8mx49\" (UID: \"c72ed570-ae2c-4d46-92dd-69f41bda14ca\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524174 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6sdbx\" (UniqueName: \"kubernetes.io/projected/9ce11448-b517-411b-8bf1-8494fc43116e-kube-api-access-6sdbx\") pod \"openshift-controller-manager-operator-756b6f6bc6-tjwg2\" (UID: \"9ce11448-b517-411b-8bf1-8494fc43116e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524213 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8e91dd83-2270-4ddf-bf09-d6d1b8595453-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-6kdh6\" (UID: \"8e91dd83-2270-4ddf-bf09-d6d1b8595453\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6kdh6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524239 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6d44c7b8-2cb6-4445-afe1-de6732d5c626-bound-sa-token\") pod \"ingress-operator-5b745b69d9-9vkf5\" (UID: \"6d44c7b8-2cb6-4445-afe1-de6732d5c626\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524262 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2f6a5a5c-5106-44e0-9579-ce54200179b1-webhook-cert\") pod \"packageserver-d55dfcdfc-99ns4\" (UID: \"2f6a5a5c-5106-44e0-9579-ce54200179b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524288 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvqlf\" (UniqueName: \"kubernetes.io/projected/be279f3d-aa9c-47fc-8396-8ffb895fc9b6-kube-api-access-cvqlf\") pod \"cluster-samples-operator-665b6dd947-pxwc5\" (UID: \"be279f3d-aa9c-47fc-8396-8ffb895fc9b6\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-pxwc5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524309 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5426ed16-fe90-4dc5-9a28-13d3649074e6-config\") pod \"openshift-apiserver-operator-796bbdcf4f-krsnh\" (UID: \"5426ed16-fe90-4dc5-9a28-13d3649074e6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524331 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524354 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnbg7\" (UniqueName: \"kubernetes.io/projected/26e227a5-f26a-4bfb-9d4b-0f6718a234b7-kube-api-access-cnbg7\") pod \"ingress-canary-f5cd4\" (UID: \"26e227a5-f26a-4bfb-9d4b-0f6718a234b7\") " pod="openshift-ingress-canary/ingress-canary-f5cd4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524383 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/101e93eb-cfad-49df-95ce-b6b12664dd3a-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-77xrv\" (UID: \"101e93eb-cfad-49df-95ce-b6b12664dd3a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-77xrv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524406 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1753f6a6-b8b0-4bea-9d03-a6d8539a25dd-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-dq58g\" (UID: \"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524429 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1753f6a6-b8b0-4bea-9d03-a6d8539a25dd-serving-cert\") pod \"authentication-operator-69f744f599-dq58g\" (UID: \"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524452 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpznr\" (UniqueName: \"kubernetes.io/projected/101e93eb-cfad-49df-95ce-b6b12664dd3a-kube-api-access-gpznr\") pod \"control-plane-machine-set-operator-78cbb6b69f-77xrv\" (UID: \"101e93eb-cfad-49df-95ce-b6b12664dd3a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-77xrv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524474 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/54bd74da-566f-468a-a57f-fbbd0bfd2a50-etcd-ca\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524496 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a899a8b5-e5c4-456e-a469-203bbcf5445e-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-8cghq\" (UID: \"a899a8b5-e5c4-456e-a469-203bbcf5445e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524506 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/86145702-4563-405c-bc92-cddd39e5e750-metrics-tls\") pod \"dns-default-97wsg\" (UID: \"86145702-4563-405c-bc92-cddd39e5e750\") " pod="openshift-dns/dns-default-97wsg" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524528 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54bd74da-566f-468a-a57f-fbbd0bfd2a50-config\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524560 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6txwk\" (UniqueName: \"kubernetes.io/projected/181953a7-e119-4c51-b16f-f1ec4b63da1e-kube-api-access-6txwk\") pod \"console-operator-58897d9998-zjxxb\" (UID: \"181953a7-e119-4c51-b16f-f1ec4b63da1e\") " pod="openshift-console-operator/console-operator-58897d9998-zjxxb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524582 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1e659c79-33c6-49d3-a333-5280ece9fa5b-secret-volume\") pod \"collect-profiles-29329770-k9xbb\" (UID: \"1e659c79-33c6-49d3-a333-5280ece9fa5b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524608 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6ff5a2f-dbc4-4445-b297-e8627bfb2d04-config\") pod \"machine-approver-56656f9798-t2756\" (UID: \"a6ff5a2f-dbc4-4445-b297-e8627bfb2d04\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524651 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dcdf5ea0-5d82-4417-89fa-6c37aee2916d-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-w8lt2\" (UID: \"dcdf5ea0-5d82-4417-89fa-6c37aee2916d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524682 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8mzs\" (UniqueName: \"kubernetes.io/projected/2bb77525-303a-4691-81d2-0bbeb6eeed9c-kube-api-access-h8mzs\") pod \"downloads-7954f5f757-zh4s7\" (UID: \"2bb77525-303a-4691-81d2-0bbeb6eeed9c\") " pod="openshift-console/downloads-7954f5f757-zh4s7" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524705 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a899a8b5-e5c4-456e-a469-203bbcf5445e-config\") pod \"kube-apiserver-operator-766d6c64bb-8cghq\" (UID: \"a899a8b5-e5c4-456e-a469-203bbcf5445e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524735 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hr4qv\" (UniqueName: \"kubernetes.io/projected/d1e1ed07-468a-4627-9690-ab83129e9a93-kube-api-access-hr4qv\") pod \"migrator-59844c95c7-n59hv\" (UID: \"d1e1ed07-468a-4627-9690-ab83129e9a93\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n59hv" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524759 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/a6ff5a2f-dbc4-4445-b297-e8627bfb2d04-machine-approver-tls\") pod \"machine-approver-56656f9798-t2756\" (UID: \"a6ff5a2f-dbc4-4445-b297-e8627bfb2d04\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524768 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-oauth-serving-cert\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524782 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e659c79-33c6-49d3-a333-5280ece9fa5b-config-volume\") pod \"collect-profiles-29329770-k9xbb\" (UID: \"1e659c79-33c6-49d3-a333-5280ece9fa5b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524906 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524953 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a6ff5a2f-dbc4-4445-b297-e8627bfb2d04-auth-proxy-config\") pod \"machine-approver-56656f9798-t2756\" (UID: \"a6ff5a2f-dbc4-4445-b297-e8627bfb2d04\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.524989 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/54bd74da-566f-468a-a57f-fbbd0bfd2a50-etcd-service-ca\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.525053 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/2f6a5a5c-5106-44e0-9579-ce54200179b1-tmpfs\") pod \"packageserver-d55dfcdfc-99ns4\" (UID: \"2f6a5a5c-5106-44e0-9579-ce54200179b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.525093 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ce11448-b517-411b-8bf1-8494fc43116e-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-tjwg2\" (UID: \"9ce11448-b517-411b-8bf1-8494fc43116e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.525125 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43df857e-f7f9-45e8-97e7-21adc3167678-console-oauth-config\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.525162 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.525199 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.525245 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzhrl\" (UniqueName: \"kubernetes.io/projected/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-kube-api-access-mzhrl\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.525254 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-service-ca\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.525283 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5p2l9\" (UniqueName: \"kubernetes.io/projected/8e91dd83-2270-4ddf-bf09-d6d1b8595453-kube-api-access-5p2l9\") pod \"multus-admission-controller-857f4d67dd-6kdh6\" (UID: \"8e91dd83-2270-4ddf-bf09-d6d1b8595453\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6kdh6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.525321 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-console-config\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.525370 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-plugins-dir\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.525490 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-csi-data-dir\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.525942 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-mountpoint-dir\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.526140 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d352ce1-8de3-49ad-83b7-62f38f1864aa-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-q4xf4\" (UID: \"6d352ce1-8de3-49ad-83b7-62f38f1864aa\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.526330 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/2f6a5a5c-5106-44e0-9579-ce54200179b1-tmpfs\") pod \"packageserver-d55dfcdfc-99ns4\" (UID: \"2f6a5a5c-5106-44e0-9579-ce54200179b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.527393 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a6ff5a2f-dbc4-4445-b297-e8627bfb2d04-auth-proxy-config\") pod \"machine-approver-56656f9798-t2756\" (UID: \"a6ff5a2f-dbc4-4445-b297-e8627bfb2d04\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.528495 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6ff5a2f-dbc4-4445-b297-e8627bfb2d04-config\") pod \"machine-approver-56656f9798-t2756\" (UID: \"a6ff5a2f-dbc4-4445-b297-e8627bfb2d04\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.528591 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c72ed570-ae2c-4d46-92dd-69f41bda14ca-config\") pod \"kube-controller-manager-operator-78b949d7b-8mx49\" (UID: \"c72ed570-ae2c-4d46-92dd-69f41bda14ca\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.529495 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9ce11448-b517-411b-8bf1-8494fc43116e-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-tjwg2\" (UID: \"9ce11448-b517-411b-8bf1-8494fc43116e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.529778 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ce11448-b517-411b-8bf1-8494fc43116e-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-tjwg2\" (UID: \"9ce11448-b517-411b-8bf1-8494fc43116e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.530069 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1753f6a6-b8b0-4bea-9d03-a6d8539a25dd-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-dq58g\" (UID: \"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.530216 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-console-config\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.530851 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a899a8b5-e5c4-456e-a469-203bbcf5445e-config\") pod \"kube-apiserver-operator-766d6c64bb-8cghq\" (UID: \"a899a8b5-e5c4-456e-a469-203bbcf5445e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.531016 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1753f6a6-b8b0-4bea-9d03-a6d8539a25dd-service-ca-bundle\") pod \"authentication-operator-69f744f599-dq58g\" (UID: \"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.531443 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43df857e-f7f9-45e8-97e7-21adc3167678-console-serving-cert\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.532047 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/537a285f-ff1e-4ae8-954d-0ff696453b85-srv-cert\") pod \"olm-operator-6b444d44fb-gd58z\" (UID: \"537a285f-ff1e-4ae8-954d-0ff696453b85\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.532682 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/01a961cf-0904-406d-862a-027b53178111-profile-collector-cert\") pod \"catalog-operator-68c6474976-8mlsl\" (UID: \"01a961cf-0904-406d-862a-027b53178111\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.532951 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.533098 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43df857e-f7f9-45e8-97e7-21adc3167678-console-oauth-config\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.533536 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1e659c79-33c6-49d3-a333-5280ece9fa5b-secret-volume\") pod \"collect-profiles-29329770-k9xbb\" (UID: \"1e659c79-33c6-49d3-a333-5280ece9fa5b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.533869 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c72ed570-ae2c-4d46-92dd-69f41bda14ca-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8mx49\" (UID: \"c72ed570-ae2c-4d46-92dd-69f41bda14ca\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.533880 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8e91dd83-2270-4ddf-bf09-d6d1b8595453-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-6kdh6\" (UID: \"8e91dd83-2270-4ddf-bf09-d6d1b8595453\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6kdh6" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.533919 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1753f6a6-b8b0-4bea-9d03-a6d8539a25dd-serving-cert\") pod \"authentication-operator-69f744f599-dq58g\" (UID: \"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.534480 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/a6ff5a2f-dbc4-4445-b297-e8627bfb2d04-machine-approver-tls\") pod \"machine-approver-56656f9798-t2756\" (UID: \"a6ff5a2f-dbc4-4445-b297-e8627bfb2d04\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.535901 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/537a285f-ff1e-4ae8-954d-0ff696453b85-profile-collector-cert\") pod \"olm-operator-6b444d44fb-gd58z\" (UID: \"537a285f-ff1e-4ae8-954d-0ff696453b85\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.539859 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5426ed16-fe90-4dc5-9a28-13d3649074e6-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-krsnh\" (UID: \"5426ed16-fe90-4dc5-9a28-13d3649074e6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.553769 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.558210 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5426ed16-fe90-4dc5-9a28-13d3649074e6-config\") pod \"openshift-apiserver-operator-796bbdcf4f-krsnh\" (UID: \"5426ed16-fe90-4dc5-9a28-13d3649074e6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.578123 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.595547 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.613390 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.616178 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6d44c7b8-2cb6-4445-afe1-de6732d5c626-metrics-tls\") pod \"ingress-operator-5b745b69d9-9vkf5\" (UID: \"6d44c7b8-2cb6-4445-afe1-de6732d5c626\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.626133 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:14 crc kubenswrapper[5014]: E1006 21:33:14.626680 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.126646135 +0000 UTC m=+140.419682879 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.627489 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: E1006 21:33:14.627920 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.127903528 +0000 UTC m=+140.420940272 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.633202 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.661192 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.663592 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6d44c7b8-2cb6-4445-afe1-de6732d5c626-trusted-ca\") pod \"ingress-operator-5b745b69d9-9vkf5\" (UID: \"6d44c7b8-2cb6-4445-afe1-de6732d5c626\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.673758 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.714059 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.728757 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:14 crc kubenswrapper[5014]: E1006 21:33:14.728912 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.228880931 +0000 UTC m=+140.521917705 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.729505 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: E1006 21:33:14.729947 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.229930987 +0000 UTC m=+140.522967761 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.733596 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.753457 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.773317 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.787775 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ef9faff3-6d7d-40f6-99c6-3771e940c03b-serving-cert\") pod \"service-ca-operator-777779d784-qh6cx\" (UID: \"ef9faff3-6d7d-40f6-99c6-3771e940c03b\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.793704 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.797533 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef9faff3-6d7d-40f6-99c6-3771e940c03b-config\") pod \"service-ca-operator-777779d784-qh6cx\" (UID: \"ef9faff3-6d7d-40f6-99c6-3771e940c03b\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.814140 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.831112 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:14 crc kubenswrapper[5014]: E1006 21:33:14.831253 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.33122409 +0000 UTC m=+140.624260854 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.831948 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: E1006 21:33:14.832382 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.33236557 +0000 UTC m=+140.625402344 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.834174 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.854069 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.870442 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/be279f3d-aa9c-47fc-8396-8ffb895fc9b6-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-pxwc5\" (UID: \"be279f3d-aa9c-47fc-8396-8ffb895fc9b6\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-pxwc5" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.873984 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.893941 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.908263 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/01a961cf-0904-406d-862a-027b53178111-srv-cert\") pod \"catalog-operator-68c6474976-8mlsl\" (UID: \"01a961cf-0904-406d-862a-027b53178111\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.913418 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.925539 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/54bd74da-566f-468a-a57f-fbbd0bfd2a50-etcd-client\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.933066 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:14 crc kubenswrapper[5014]: E1006 21:33:14.933240 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.433201828 +0000 UTC m=+140.726238602 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.933479 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.933801 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: E1006 21:33:14.934087 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.434064467 +0000 UTC m=+140.727101241 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.954095 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.956204 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/54bd74da-566f-468a-a57f-fbbd0bfd2a50-etcd-service-ca\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.973596 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.993764 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Oct 06 21:33:14 crc kubenswrapper[5014]: I1006 21:33:14.997917 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/54bd74da-566f-468a-a57f-fbbd0bfd2a50-etcd-ca\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.014222 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.034813 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.036340 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.036788 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.536746058 +0000 UTC m=+140.829782842 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.037785 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.039080 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.539063206 +0000 UTC m=+140.832099980 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.054826 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.067244 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/54bd74da-566f-468a-a57f-fbbd0bfd2a50-serving-cert\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.074931 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.095130 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.101671 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/101e93eb-cfad-49df-95ce-b6b12664dd3a-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-77xrv\" (UID: \"101e93eb-cfad-49df-95ce-b6b12664dd3a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-77xrv" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.113493 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.134704 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.140040 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.140232 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.640204545 +0000 UTC m=+140.933241309 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.140886 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.141346 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.641329073 +0000 UTC m=+140.934365837 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.153256 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.158444 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54bd74da-566f-468a-a57f-fbbd0bfd2a50-config\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.173745 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.194176 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.213469 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.235079 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.243198 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.244250 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.74419743 +0000 UTC m=+141.037234174 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.251355 5014 request.go:700] Waited for 1.0067307s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-dns-operator/secrets?fieldSelector=metadata.name%3Dmetrics-tls&limit=500&resourceVersion=0 Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.253906 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.268313 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6a8033e0-8d2f-4f40-bab2-b2670cefe0b3-metrics-tls\") pod \"dns-operator-744455d44c-wqnvh\" (UID: \"6a8033e0-8d2f-4f40-bab2-b2670cefe0b3\") " pod="openshift-dns-operator/dns-operator-744455d44c-wqnvh" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.274147 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.293122 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.314571 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.334246 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.346702 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.347240 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.847212602 +0000 UTC m=+141.140249366 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.348805 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/181953a7-e119-4c51-b16f-f1ec4b63da1e-serving-cert\") pod \"console-operator-58897d9998-zjxxb\" (UID: \"181953a7-e119-4c51-b16f-f1ec4b63da1e\") " pod="openshift-console-operator/console-operator-58897d9998-zjxxb" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.354454 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.374956 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.382930 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/181953a7-e119-4c51-b16f-f1ec4b63da1e-config\") pod \"console-operator-58897d9998-zjxxb\" (UID: \"181953a7-e119-4c51-b16f-f1ec4b63da1e\") " pod="openshift-console-operator/console-operator-58897d9998-zjxxb" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.405977 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.413959 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.417231 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/181953a7-e119-4c51-b16f-f1ec4b63da1e-trusted-ca\") pod \"console-operator-58897d9998-zjxxb\" (UID: \"181953a7-e119-4c51-b16f-f1ec4b63da1e\") " pod="openshift-console-operator/console-operator-58897d9998-zjxxb" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.422799 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2f6a5a5c-5106-44e0-9579-ce54200179b1-apiservice-cert\") pod \"packageserver-d55dfcdfc-99ns4\" (UID: \"2f6a5a5c-5106-44e0-9579-ce54200179b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.422912 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2f6a5a5c-5106-44e0-9579-ce54200179b1-webhook-cert\") pod \"packageserver-d55dfcdfc-99ns4\" (UID: \"2f6a5a5c-5106-44e0-9579-ce54200179b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.435528 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.448183 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.448585 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.948532457 +0000 UTC m=+141.241569231 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.449436 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.449977 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:15.949948305 +0000 UTC m=+141.242985069 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.454487 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.456927 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e659c79-33c6-49d3-a333-5280ece9fa5b-config-volume\") pod \"collect-profiles-29329770-k9xbb\" (UID: \"1e659c79-33c6-49d3-a333-5280ece9fa5b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.474320 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.480362 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.493424 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.521534 5014 secret.go:188] Couldn't get secret openshift-ingress-canary/canary-serving-cert: failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.521579 5014 secret.go:188] Couldn't get secret openshift-kube-storage-version-migrator-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.521720 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/26e227a5-f26a-4bfb-9d4b-0f6718a234b7-cert podName:26e227a5-f26a-4bfb-9d4b-0f6718a234b7 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.02167745 +0000 UTC m=+141.314714224 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/26e227a5-f26a-4bfb-9d4b-0f6718a234b7-cert") pod "ingress-canary-f5cd4" (UID: "26e227a5-f26a-4bfb-9d4b-0f6718a234b7") : failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.521763 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dcdf5ea0-5d82-4417-89fa-6c37aee2916d-serving-cert podName:dcdf5ea0-5d82-4417-89fa-6c37aee2916d nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.021744042 +0000 UTC m=+141.314780816 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/dcdf5ea0-5d82-4417-89fa-6c37aee2916d-serving-cert") pod "kube-storage-version-migrator-operator-b67b599dd-w8lt2" (UID: "dcdf5ea0-5d82-4417-89fa-6c37aee2916d") : failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.522186 5014 secret.go:188] Couldn't get secret openshift-machine-api/machine-api-operator-tls: failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.522349 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f7160d37-518f-49e6-aee8-ce14c3267c54-machine-api-operator-tls podName:f7160d37-518f-49e6-aee8-ce14c3267c54 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.022283671 +0000 UTC m=+141.315320435 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/f7160d37-518f-49e6-aee8-ce14c3267c54-machine-api-operator-tls") pod "machine-api-operator-5694c8668f-ffz9s" (UID: "f7160d37-518f-49e6-aee8-ce14c3267c54") : failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.522657 5014 configmap.go:193] Couldn't get configMap openshift-authentication/audit: failed to sync configmap cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.522754 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-audit-policies podName:720c691e-a28e-4b39-9571-86e321399306 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.022731266 +0000 UTC m=+141.315768030 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "audit-policies" (UniqueName: "kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-audit-policies") pod "oauth-openshift-558db77b4-v74l6" (UID: "720c691e-a28e-4b39-9571-86e321399306") : failed to sync configmap cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.522805 5014 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-user-idp-0-file-data: failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.522853 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-idp-0-file-data podName:720c691e-a28e-4b39-9571-86e321399306 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.022840049 +0000 UTC m=+141.315876823 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-user-idp-0-file-data" (UniqueName: "kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-idp-0-file-data") pod "oauth-openshift-558db77b4-v74l6" (UID: "720c691e-a28e-4b39-9571-86e321399306") : failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.522888 5014 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-system-ocp-branding-template: failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.522919 5014 configmap.go:193] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: failed to sync configmap cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.523001 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-ocp-branding-template podName:720c691e-a28e-4b39-9571-86e321399306 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.022973593 +0000 UTC m=+141.316010437 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-ocp-branding-template" (UniqueName: "kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-ocp-branding-template") pod "oauth-openshift-558db77b4-v74l6" (UID: "720c691e-a28e-4b39-9571-86e321399306") : failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.523124 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f7160d37-518f-49e6-aee8-ce14c3267c54-config podName:f7160d37-518f-49e6-aee8-ce14c3267c54 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.023035796 +0000 UTC m=+141.316072570 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/f7160d37-518f-49e6-aee8-ce14c3267c54-config") pod "machine-api-operator-5694c8668f-ffz9s" (UID: "f7160d37-518f-49e6-aee8-ce14c3267c54") : failed to sync configmap cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.523792 5014 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-user-template-login: failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.523870 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-login podName:720c691e-a28e-4b39-9571-86e321399306 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.023852663 +0000 UTC m=+141.316889427 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-login" (UniqueName: "kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-login") pod "oauth-openshift-558db77b4-v74l6" (UID: "720c691e-a28e-4b39-9571-86e321399306") : failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.524101 5014 configmap.go:193] Couldn't get configMap openshift-authentication/v4-0-config-system-trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.524178 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-trusted-ca-bundle podName:720c691e-a28e-4b39-9571-86e321399306 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.024160383 +0000 UTC m=+141.317197157 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-trusted-ca-bundle") pod "oauth-openshift-558db77b4-v74l6" (UID: "720c691e-a28e-4b39-9571-86e321399306") : failed to sync configmap cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.524186 5014 secret.go:188] Couldn't get secret openshift-machine-config-operator/machine-config-server-tls: failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.524830 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bda928e7-be85-4f32-be06-049b116323d9-certs podName:bda928e7-be85-4f32-be06-049b116323d9 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.024809596 +0000 UTC m=+141.317846360 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "certs" (UniqueName: "kubernetes.io/secret/bda928e7-be85-4f32-be06-049b116323d9-certs") pod "machine-config-server-6lc74" (UID: "bda928e7-be85-4f32-be06-049b116323d9") : failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.525320 5014 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-system-serving-cert: failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.525358 5014 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-user-template-error: failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.525392 5014 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-user-template-provider-selection: failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.525467 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-error podName:720c691e-a28e-4b39-9571-86e321399306 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.025441157 +0000 UTC m=+141.318478111 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-error" (UniqueName: "kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-error") pod "oauth-openshift-558db77b4-v74l6" (UID: "720c691e-a28e-4b39-9571-86e321399306") : failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.525494 5014 secret.go:188] Couldn't get secret openshift-machine-config-operator/node-bootstrapper-token: failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.525516 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-serving-cert podName:720c691e-a28e-4b39-9571-86e321399306 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.025493869 +0000 UTC m=+141.318530883 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-serving-cert" (UniqueName: "kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-serving-cert") pod "oauth-openshift-558db77b4-v74l6" (UID: "720c691e-a28e-4b39-9571-86e321399306") : failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.525552 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bda928e7-be85-4f32-be06-049b116323d9-node-bootstrap-token podName:bda928e7-be85-4f32-be06-049b116323d9 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.02553344 +0000 UTC m=+141.318570214 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "node-bootstrap-token" (UniqueName: "kubernetes.io/secret/bda928e7-be85-4f32-be06-049b116323d9-node-bootstrap-token") pod "machine-config-server-6lc74" (UID: "bda928e7-be85-4f32-be06-049b116323d9") : failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.525592 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-provider-selection podName:720c691e-a28e-4b39-9571-86e321399306 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.025574861 +0000 UTC m=+141.318611635 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-user-template-provider-selection" (UniqueName: "kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-provider-selection") pod "oauth-openshift-558db77b4-v74l6" (UID: "720c691e-a28e-4b39-9571-86e321399306") : failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.525683 5014 configmap.go:193] Couldn't get configMap openshift-machine-api/machine-api-operator-images: failed to sync configmap cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.525799 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f7160d37-518f-49e6-aee8-ce14c3267c54-images podName:f7160d37-518f-49e6-aee8-ce14c3267c54 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.025758138 +0000 UTC m=+141.318795102 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/f7160d37-518f-49e6-aee8-ce14c3267c54-images") pod "machine-api-operator-5694c8668f-ffz9s" (UID: "f7160d37-518f-49e6-aee8-ce14c3267c54") : failed to sync configmap cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.525880 5014 configmap.go:193] Couldn't get configMap openshift-dns/dns-default: failed to sync configmap cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.525944 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/86145702-4563-405c-bc92-cddd39e5e750-config-volume podName:86145702-4563-405c-bc92-cddd39e5e750 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.025924874 +0000 UTC m=+141.318961868 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/86145702-4563-405c-bc92-cddd39e5e750-config-volume") pod "dns-default-97wsg" (UID: "86145702-4563-405c-bc92-cddd39e5e750") : failed to sync configmap cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.527372 5014 configmap.go:193] Couldn't get configMap openshift-authentication/v4-0-config-system-cliconfig: failed to sync configmap cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.527464 5014 configmap.go:193] Couldn't get configMap openshift-authentication/v4-0-config-system-service-ca: failed to sync configmap cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.527488 5014 secret.go:188] Couldn't get secret openshift-dns/dns-default-metrics-tls: failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.527469 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-cliconfig podName:720c691e-a28e-4b39-9571-86e321399306 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.027440745 +0000 UTC m=+141.320477689 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-cliconfig" (UniqueName: "kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-cliconfig") pod "oauth-openshift-558db77b4-v74l6" (UID: "720c691e-a28e-4b39-9571-86e321399306") : failed to sync configmap cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.527376 5014 secret.go:188] Couldn't get secret openshift-authentication/v4-0-config-system-session: failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.527567 5014 configmap.go:193] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: failed to sync configmap cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.527585 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-service-ca podName:720c691e-a28e-4b39-9571-86e321399306 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.027548838 +0000 UTC m=+141.320585822 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-service-ca" (UniqueName: "kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-service-ca") pod "oauth-openshift-558db77b4-v74l6" (UID: "720c691e-a28e-4b39-9571-86e321399306") : failed to sync configmap cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.527651 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-session podName:720c691e-a28e-4b39-9571-86e321399306 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.02760514 +0000 UTC m=+141.320641904 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "v4-0-config-system-session" (UniqueName: "kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-session") pod "oauth-openshift-558db77b4-v74l6" (UID: "720c691e-a28e-4b39-9571-86e321399306") : failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.527688 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/86145702-4563-405c-bc92-cddd39e5e750-metrics-tls podName:86145702-4563-405c-bc92-cddd39e5e750 nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.027669692 +0000 UTC m=+141.320706466 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/86145702-4563-405c-bc92-cddd39e5e750-metrics-tls") pod "dns-default-97wsg" (UID: "86145702-4563-405c-bc92-cddd39e5e750") : failed to sync secret cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.527769 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/dcdf5ea0-5d82-4417-89fa-6c37aee2916d-config podName:dcdf5ea0-5d82-4417-89fa-6c37aee2916d nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.027711294 +0000 UTC m=+141.320748058 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/dcdf5ea0-5d82-4417-89fa-6c37aee2916d-config") pod "kube-storage-version-migrator-operator-b67b599dd-w8lt2" (UID: "dcdf5ea0-5d82-4417-89fa-6c37aee2916d") : failed to sync configmap cache: timed out waiting for the condition Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.534017 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.539418 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.551498 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.551713 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.051683454 +0000 UTC m=+141.344720228 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.552714 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.553383 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.05334288 +0000 UTC m=+141.346379664 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.566336 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.573686 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.593721 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.615513 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.633789 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.653334 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.655447 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.655713 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.155672189 +0000 UTC m=+141.448708973 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.656169 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.656607 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.15659076 +0000 UTC m=+141.449627534 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.684326 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.693298 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.713715 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.733391 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.752824 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.757846 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.758011 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.257980808 +0000 UTC m=+141.551017582 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.758500 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.759089 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.259061043 +0000 UTC m=+141.552097817 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.773401 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.794164 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.813805 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.837178 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.853997 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.860044 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.860487 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.360450831 +0000 UTC m=+141.653487605 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.862574 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.362555812 +0000 UTC m=+141.655592586 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.862091 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.873913 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.894041 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.913952 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.933680 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.953892 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.965528 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.965752 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.465719489 +0000 UTC m=+141.758756223 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.966686 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:15 crc kubenswrapper[5014]: E1006 21:33:15.967047 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.467039644 +0000 UTC m=+141.760076378 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.974077 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Oct 06 21:33:15 crc kubenswrapper[5014]: I1006 21:33:15.994169 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.014542 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.033738 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.053886 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.067751 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:16 crc kubenswrapper[5014]: E1006 21:33:16.068047 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.567982666 +0000 UTC m=+141.861019440 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.068144 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/bda928e7-be85-4f32-be06-049b116323d9-node-bootstrap-token\") pod \"machine-config-server-6lc74\" (UID: \"bda928e7-be85-4f32-be06-049b116323d9\") " pod="openshift-machine-config-operator/machine-config-server-6lc74" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.068253 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/86145702-4563-405c-bc92-cddd39e5e750-config-volume\") pod \"dns-default-97wsg\" (UID: \"86145702-4563-405c-bc92-cddd39e5e750\") " pod="openshift-dns/dns-default-97wsg" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.068514 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.068909 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/86145702-4563-405c-bc92-cddd39e5e750-metrics-tls\") pod \"dns-default-97wsg\" (UID: \"86145702-4563-405c-bc92-cddd39e5e750\") " pod="openshift-dns/dns-default-97wsg" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.069738 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.070246 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/86145702-4563-405c-bc92-cddd39e5e750-config-volume\") pod \"dns-default-97wsg\" (UID: \"86145702-4563-405c-bc92-cddd39e5e750\") " pod="openshift-dns/dns-default-97wsg" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.071842 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dcdf5ea0-5d82-4417-89fa-6c37aee2916d-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-w8lt2\" (UID: \"dcdf5ea0-5d82-4417-89fa-6c37aee2916d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.072003 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.072128 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.072172 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.072254 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcdf5ea0-5d82-4417-89fa-6c37aee2916d-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-w8lt2\" (UID: \"dcdf5ea0-5d82-4417-89fa-6c37aee2916d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.072303 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/26e227a5-f26a-4bfb-9d4b-0f6718a234b7-cert\") pod \"ingress-canary-f5cd4\" (UID: \"26e227a5-f26a-4bfb-9d4b-0f6718a234b7\") " pod="openshift-ingress-canary/ingress-canary-f5cd4" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.072524 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-audit-policies\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.072569 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/f7160d37-518f-49e6-aee8-ce14c3267c54-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-ffz9s\" (UID: \"f7160d37-518f-49e6-aee8-ce14c3267c54\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.072614 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.072693 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7160d37-518f-49e6-aee8-ce14c3267c54-config\") pod \"machine-api-operator-5694c8668f-ffz9s\" (UID: \"f7160d37-518f-49e6-aee8-ce14c3267c54\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.072733 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.072773 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.072870 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.072998 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.073034 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f7160d37-518f-49e6-aee8-ce14c3267c54-images\") pod \"machine-api-operator-5694c8668f-ffz9s\" (UID: \"f7160d37-518f-49e6-aee8-ce14c3267c54\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.073067 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/bda928e7-be85-4f32-be06-049b116323d9-certs\") pod \"machine-config-server-6lc74\" (UID: \"bda928e7-be85-4f32-be06-049b116323d9\") " pod="openshift-machine-config-operator/machine-config-server-6lc74" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.073109 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.073148 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.073140 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dcdf5ea0-5d82-4417-89fa-6c37aee2916d-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-w8lt2\" (UID: \"dcdf5ea0-5d82-4417-89fa-6c37aee2916d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.075473 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.075878 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/86145702-4563-405c-bc92-cddd39e5e750-metrics-tls\") pod \"dns-default-97wsg\" (UID: \"86145702-4563-405c-bc92-cddd39e5e750\") " pod="openshift-dns/dns-default-97wsg" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.077130 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/bda928e7-be85-4f32-be06-049b116323d9-node-bootstrap-token\") pod \"machine-config-server-6lc74\" (UID: \"bda928e7-be85-4f32-be06-049b116323d9\") " pod="openshift-machine-config-operator/machine-config-server-6lc74" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.077271 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7160d37-518f-49e6-aee8-ce14c3267c54-config\") pod \"machine-api-operator-5694c8668f-ffz9s\" (UID: \"f7160d37-518f-49e6-aee8-ce14c3267c54\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.078315 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.079581 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-audit-policies\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: E1006 21:33:16.079876 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.579842927 +0000 UTC m=+141.872879701 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.080380 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.080567 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/bda928e7-be85-4f32-be06-049b116323d9-certs\") pod \"machine-config-server-6lc74\" (UID: \"bda928e7-be85-4f32-be06-049b116323d9\") " pod="openshift-machine-config-operator/machine-config-server-6lc74" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.080700 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.081037 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f7160d37-518f-49e6-aee8-ce14c3267c54-images\") pod \"machine-api-operator-5694c8668f-ffz9s\" (UID: \"f7160d37-518f-49e6-aee8-ce14c3267c54\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.081340 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcdf5ea0-5d82-4417-89fa-6c37aee2916d-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-w8lt2\" (UID: \"dcdf5ea0-5d82-4417-89fa-6c37aee2916d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.082674 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.084761 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/f7160d37-518f-49e6-aee8-ce14c3267c54-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-ffz9s\" (UID: \"f7160d37-518f-49e6-aee8-ce14c3267c54\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.085568 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.085597 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.085764 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.088435 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.105175 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7jjh\" (UniqueName: \"kubernetes.io/projected/04070324-674e-4785-aada-ad9ffe6e89c8-kube-api-access-x7jjh\") pod \"marketplace-operator-79b997595-lts6v\" (UID: \"04070324-674e-4785-aada-ad9ffe6e89c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.114614 5014 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.122892 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sz9zw\" (UniqueName: \"kubernetes.io/projected/48bcee5f-3c11-4784-aa10-c5673058c7b1-kube-api-access-sz9zw\") pod \"controller-manager-879f6c89f-pc5xx\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.133920 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.155684 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.173773 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.173874 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:16 crc kubenswrapper[5014]: E1006 21:33:16.174164 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.674123294 +0000 UTC m=+141.967160058 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.174656 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:16 crc kubenswrapper[5014]: E1006 21:33:16.175267 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.675245131 +0000 UTC m=+141.968281895 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.193291 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.214188 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.229545 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/26e227a5-f26a-4bfb-9d4b-0f6718a234b7-cert\") pod \"ingress-canary-f5cd4\" (UID: \"26e227a5-f26a-4bfb-9d4b-0f6718a234b7\") " pod="openshift-ingress-canary/ingress-canary-f5cd4" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.235178 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.250721 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.251496 5014 request.go:700] Waited for 1.840454911s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca/serviceaccounts/service-ca/token Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.276812 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:16 crc kubenswrapper[5014]: E1006 21:33:16.277927 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.77783488 +0000 UTC m=+142.070871664 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.279848 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:16 crc kubenswrapper[5014]: E1006 21:33:16.280409 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.780380845 +0000 UTC m=+142.073417619 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.285579 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjjjw\" (UniqueName: \"kubernetes.io/projected/627924fa-cfa6-404d-91a0-fbf6505ce05b-kube-api-access-jjjjw\") pod \"service-ca-9c57cc56f-wd97j\" (UID: \"627924fa-cfa6-404d-91a0-fbf6505ce05b\") " pod="openshift-service-ca/service-ca-9c57cc56f-wd97j" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.293903 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.303700 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-wd97j" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.308054 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsj5g\" (UniqueName: \"kubernetes.io/projected/b9aabe83-7840-47cc-b3d5-72b068737094-kube-api-access-rsj5g\") pod \"route-controller-manager-6576b87f9c-qnjng\" (UID: \"b9aabe83-7840-47cc-b3d5-72b068737094\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.327118 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6k4t\" (UniqueName: \"kubernetes.io/projected/800f445b-95a6-4098-bf5f-ea91a7ead3d0-kube-api-access-p6k4t\") pod \"router-default-5444994796-s45dn\" (UID: \"800f445b-95a6-4098-bf5f-ea91a7ead3d0\") " pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.336148 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.340206 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-bound-sa-token\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.365310 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2qpv\" (UniqueName: \"kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-kube-api-access-q2qpv\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:16 crc kubenswrapper[5014]: W1006 21:33:16.370360 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod800f445b_95a6_4098_bf5f_ea91a7ead3d0.slice/crio-4b27d1f73e35937a4b2a99c2d179b579116b707a5cec700821516754079c9a55 WatchSource:0}: Error finding container 4b27d1f73e35937a4b2a99c2d179b579116b707a5cec700821516754079c9a55: Status 404 returned error can't find the container with id 4b27d1f73e35937a4b2a99c2d179b579116b707a5cec700821516754079c9a55 Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.380892 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:16 crc kubenswrapper[5014]: E1006 21:33:16.381052 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.881011747 +0000 UTC m=+142.174048531 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.381330 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:16 crc kubenswrapper[5014]: E1006 21:33:16.381933 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.881912228 +0000 UTC m=+142.174949002 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.387389 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrcn2\" (UniqueName: \"kubernetes.io/projected/7a0e9e21-4f5b-4e63-b976-1f16d01f357c-kube-api-access-qrcn2\") pod \"machine-config-controller-84d6567774-zjlcj\" (UID: \"7a0e9e21-4f5b-4e63-b976-1f16d01f357c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.401497 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.406732 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7hcb\" (UniqueName: \"kubernetes.io/projected/65d0615f-167e-462b-b846-cee104cdeec4-kube-api-access-p7hcb\") pod \"machine-config-operator-74547568cd-8pkfv\" (UID: \"65d0615f-167e-462b-b846-cee104cdeec4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.417127 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w75jv\" (UniqueName: \"kubernetes.io/projected/fbda3f6d-582e-4290-8939-e06c2f971f0e-kube-api-access-w75jv\") pod \"openshift-config-operator-7777fb866f-6dcxq\" (UID: \"fbda3f6d-582e-4290-8939-e06c2f971f0e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.434533 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5474w\" (UniqueName: \"kubernetes.io/projected/d3d88c45-8fdc-407e-a537-c45a05dedc4b-kube-api-access-5474w\") pod \"apiserver-7bbb656c7d-dk7qp\" (UID: \"d3d88c45-8fdc-407e-a537-c45a05dedc4b\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.458442 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6xrf\" (UniqueName: \"kubernetes.io/projected/a8703611-a93c-4633-8264-4c3ce3eaf77e-kube-api-access-f6xrf\") pod \"apiserver-76f77b778f-vdpkd\" (UID: \"a8703611-a93c-4633-8264-4c3ce3eaf77e\") " pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.473171 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/854e3c81-1547-47c9-b15c-2f226239301d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-kgppv\" (UID: \"854e3c81-1547-47c9-b15c-2f226239301d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.486702 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:16 crc kubenswrapper[5014]: E1006 21:33:16.487530 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:16.987514578 +0000 UTC m=+142.280551312 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.492309 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6rq5\" (UniqueName: \"kubernetes.io/projected/15ca2d23-bdd0-4951-aeb5-6544fd2e43d5-kube-api-access-w6rq5\") pod \"package-server-manager-789f6589d5-cl7xx\" (UID: \"15ca2d23-bdd0-4951-aeb5-6544fd2e43d5\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.512944 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9f5n\" (UniqueName: \"kubernetes.io/projected/854e3c81-1547-47c9-b15c-2f226239301d-kube-api-access-w9f5n\") pod \"cluster-image-registry-operator-dc59b4c8b-kgppv\" (UID: \"854e3c81-1547-47c9-b15c-2f226239301d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.529754 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vd5t\" (UniqueName: \"kubernetes.io/projected/1753f6a6-b8b0-4bea-9d03-a6d8539a25dd-kube-api-access-6vd5t\") pod \"authentication-operator-69f744f599-dq58g\" (UID: \"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.562208 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pc5xx"] Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.564458 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2jtw\" (UniqueName: \"kubernetes.io/projected/ef9faff3-6d7d-40f6-99c6-3771e940c03b-kube-api-access-w2jtw\") pod \"service-ca-operator-777779d784-qh6cx\" (UID: \"ef9faff3-6d7d-40f6-99c6-3771e940c03b\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.577074 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.589310 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:16 crc kubenswrapper[5014]: E1006 21:33:16.589657 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:17.089644519 +0000 UTC m=+142.382681253 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.595221 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dc45v\" (UniqueName: \"kubernetes.io/projected/bda928e7-be85-4f32-be06-049b116323d9-kube-api-access-dc45v\") pod \"machine-config-server-6lc74\" (UID: \"bda928e7-be85-4f32-be06-049b116323d9\") " pod="openshift-machine-config-operator/machine-config-server-6lc74" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.608067 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-wd97j"] Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.615990 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx" Oct 06 21:33:16 crc kubenswrapper[5014]: W1006 21:33:16.628290 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod627924fa_cfa6_404d_91a0_fbf6505ce05b.slice/crio-4b72974abd56724948b65d221f3f81806e4a6800978be86926e30afc4d364f1a WatchSource:0}: Error finding container 4b72974abd56724948b65d221f3f81806e4a6800978be86926e30afc4d364f1a: Status 404 returned error can't find the container with id 4b72974abd56724948b65d221f3f81806e4a6800978be86926e30afc4d364f1a Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.636410 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjv2r\" (UniqueName: \"kubernetes.io/projected/86145702-4563-405c-bc92-cddd39e5e750-kube-api-access-bjv2r\") pod \"dns-default-97wsg\" (UID: \"86145702-4563-405c-bc92-cddd39e5e750\") " pod="openshift-dns/dns-default-97wsg" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.640514 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6wrq\" (UniqueName: \"kubernetes.io/projected/1e659c79-33c6-49d3-a333-5280ece9fa5b-kube-api-access-k6wrq\") pod \"collect-profiles-29329770-k9xbb\" (UID: \"1e659c79-33c6-49d3-a333-5280ece9fa5b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.647348 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sczfk\" (UniqueName: \"kubernetes.io/projected/537a285f-ff1e-4ae8-954d-0ff696453b85-kube-api-access-sczfk\") pod \"olm-operator-6b444d44fb-gd58z\" (UID: \"537a285f-ff1e-4ae8-954d-0ff696453b85\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.652207 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.663143 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xr2lm\" (UniqueName: \"kubernetes.io/projected/54bd74da-566f-468a-a57f-fbbd0bfd2a50-kube-api-access-xr2lm\") pod \"etcd-operator-b45778765-zvv9h\" (UID: \"54bd74da-566f-468a-a57f-fbbd0bfd2a50\") " pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.666205 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lts6v"] Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.679758 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.680922 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.681136 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.689325 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.690222 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:16 crc kubenswrapper[5014]: E1006 21:33:16.691312 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:17.191018206 +0000 UTC m=+142.484054940 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.695224 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7nsg\" (UniqueName: \"kubernetes.io/projected/43df857e-f7f9-45e8-97e7-21adc3167678-kube-api-access-m7nsg\") pod \"console-f9d7485db-kb2p5\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.703751 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmx4v\" (UniqueName: \"kubernetes.io/projected/6a8033e0-8d2f-4f40-bab2-b2670cefe0b3-kube-api-access-xmx4v\") pod \"dns-operator-744455d44c-wqnvh\" (UID: \"6a8033e0-8d2f-4f40-bab2-b2670cefe0b3\") " pod="openshift-dns-operator/dns-operator-744455d44c-wqnvh" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.725181 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gc5pq\" (UniqueName: \"kubernetes.io/projected/2f6a5a5c-5106-44e0-9579-ce54200179b1-kube-api-access-gc5pq\") pod \"packageserver-d55dfcdfc-99ns4\" (UID: \"2f6a5a5c-5106-44e0-9579-ce54200179b1\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.727353 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zw594\" (UniqueName: \"kubernetes.io/projected/720c691e-a28e-4b39-9571-86e321399306-kube-api-access-zw594\") pod \"oauth-openshift-558db77b4-v74l6\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.731315 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng"] Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.736676 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-6lc74" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.743252 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-97wsg" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.747610 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwwbj\" (UniqueName: \"kubernetes.io/projected/01a961cf-0904-406d-862a-027b53178111-kube-api-access-vwwbj\") pod \"catalog-operator-68c6474976-8mlsl\" (UID: \"01a961cf-0904-406d-862a-027b53178111\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.758155 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.764742 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.774192 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kp7kg\" (UniqueName: \"kubernetes.io/projected/f7160d37-518f-49e6-aee8-ce14c3267c54-kube-api-access-kp7kg\") pod \"machine-api-operator-5694c8668f-ffz9s\" (UID: \"f7160d37-518f-49e6-aee8-ce14c3267c54\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.780233 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.791995 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:16 crc kubenswrapper[5014]: E1006 21:33:16.792579 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:17.292565649 +0000 UTC m=+142.585602383 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.795952 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvqlf\" (UniqueName: \"kubernetes.io/projected/be279f3d-aa9c-47fc-8396-8ffb895fc9b6-kube-api-access-cvqlf\") pod \"cluster-samples-operator-665b6dd947-pxwc5\" (UID: \"be279f3d-aa9c-47fc-8396-8ffb895fc9b6\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-pxwc5" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.796445 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx"] Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.802004 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.808395 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-pxwc5" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.815388 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c72ed570-ae2c-4d46-92dd-69f41bda14ca-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8mx49\" (UID: \"c72ed570-ae2c-4d46-92dd-69f41bda14ca\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.836951 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6424j\" (UniqueName: \"kubernetes.io/projected/dcdf5ea0-5d82-4417-89fa-6c37aee2916d-kube-api-access-6424j\") pod \"kube-storage-version-migrator-operator-b67b599dd-w8lt2\" (UID: \"dcdf5ea0-5d82-4417-89fa-6c37aee2916d\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.848168 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.849533 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9xgt\" (UniqueName: \"kubernetes.io/projected/a6ff5a2f-dbc4-4445-b297-e8627bfb2d04-kube-api-access-b9xgt\") pod \"machine-approver-56656f9798-t2756\" (UID: \"a6ff5a2f-dbc4-4445-b297-e8627bfb2d04\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.856891 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp"] Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.873660 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6sdbx\" (UniqueName: \"kubernetes.io/projected/9ce11448-b517-411b-8bf1-8494fc43116e-kube-api-access-6sdbx\") pod \"openshift-controller-manager-operator-756b6f6bc6-tjwg2\" (UID: \"9ce11448-b517-411b-8bf1-8494fc43116e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.890027 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-wqnvh" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.892995 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:16 crc kubenswrapper[5014]: E1006 21:33:16.894306 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:17.394268356 +0000 UTC m=+142.687305090 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.895683 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:16 crc kubenswrapper[5014]: E1006 21:33:16.896094 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:17.396080388 +0000 UTC m=+142.689117122 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.900916 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95m8l\" (UniqueName: \"kubernetes.io/projected/5426ed16-fe90-4dc5-9a28-13d3649074e6-kube-api-access-95m8l\") pod \"openshift-apiserver-operator-796bbdcf4f-krsnh\" (UID: \"5426ed16-fe90-4dc5-9a28-13d3649074e6\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.913075 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.914476 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-vdpkd"] Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.915035 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a899a8b5-e5c4-456e-a469-203bbcf5445e-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-8cghq\" (UID: \"a899a8b5-e5c4-456e-a469-203bbcf5445e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.925363 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.930348 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5rs5\" (UniqueName: \"kubernetes.io/projected/6d44c7b8-2cb6-4445-afe1-de6732d5c626-kube-api-access-r5rs5\") pod \"ingress-operator-5b745b69d9-9vkf5\" (UID: \"6d44c7b8-2cb6-4445-afe1-de6732d5c626\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.952408 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6d352ce1-8de3-49ad-83b7-62f38f1864aa-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-q4xf4\" (UID: \"6d352ce1-8de3-49ad-83b7-62f38f1864aa\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.962422 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.970024 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6d44c7b8-2cb6-4445-afe1-de6732d5c626-bound-sa-token\") pod \"ingress-operator-5b745b69d9-9vkf5\" (UID: \"6d44c7b8-2cb6-4445-afe1-de6732d5c626\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.989451 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzhrl\" (UniqueName: \"kubernetes.io/projected/09eaec7e-dd40-43f9-b1dd-4ef2de9b0036-kube-api-access-mzhrl\") pod \"csi-hostpathplugin-k27df\" (UID: \"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036\") " pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:16 crc kubenswrapper[5014]: I1006 21:33:16.991333 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv"] Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.000048 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:17 crc kubenswrapper[5014]: E1006 21:33:17.000266 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:17.500231938 +0000 UTC m=+142.793268682 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.000350 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:17 crc kubenswrapper[5014]: E1006 21:33:17.000677 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:17.500670333 +0000 UTC m=+142.793707077 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.004918 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.007856 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.013787 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.019765 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.026928 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.030890 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpznr\" (UniqueName: \"kubernetes.io/projected/101e93eb-cfad-49df-95ce-b6b12664dd3a-kube-api-access-gpznr\") pod \"control-plane-machine-set-operator-78cbb6b69f-77xrv\" (UID: \"101e93eb-cfad-49df-95ce-b6b12664dd3a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-77xrv" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.031437 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnbg7\" (UniqueName: \"kubernetes.io/projected/26e227a5-f26a-4bfb-9d4b-0f6718a234b7-kube-api-access-cnbg7\") pod \"ingress-canary-f5cd4\" (UID: \"26e227a5-f26a-4bfb-9d4b-0f6718a234b7\") " pod="openshift-ingress-canary/ingress-canary-f5cd4" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.033384 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.045547 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.047735 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5p2l9\" (UniqueName: \"kubernetes.io/projected/8e91dd83-2270-4ddf-bf09-d6d1b8595453-kube-api-access-5p2l9\") pod \"multus-admission-controller-857f4d67dd-6kdh6\" (UID: \"8e91dd83-2270-4ddf-bf09-d6d1b8595453\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6kdh6" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.052210 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.066419 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-k27df" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.070489 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6txwk\" (UniqueName: \"kubernetes.io/projected/181953a7-e119-4c51-b16f-f1ec4b63da1e-kube-api-access-6txwk\") pod \"console-operator-58897d9998-zjxxb\" (UID: \"181953a7-e119-4c51-b16f-f1ec4b63da1e\") " pod="openshift-console-operator/console-operator-58897d9998-zjxxb" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.070743 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-6kdh6" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.074320 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-f5cd4" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.092021 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.093813 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8mzs\" (UniqueName: \"kubernetes.io/projected/2bb77525-303a-4691-81d2-0bbeb6eeed9c-kube-api-access-h8mzs\") pod \"downloads-7954f5f757-zh4s7\" (UID: \"2bb77525-303a-4691-81d2-0bbeb6eeed9c\") " pod="openshift-console/downloads-7954f5f757-zh4s7" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.095800 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.102583 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:17 crc kubenswrapper[5014]: E1006 21:33:17.102736 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:17.602711322 +0000 UTC m=+142.895748056 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.102953 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:17 crc kubenswrapper[5014]: E1006 21:33:17.103315 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:17.603308552 +0000 UTC m=+142.896345286 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.107018 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hr4qv\" (UniqueName: \"kubernetes.io/projected/d1e1ed07-468a-4627-9690-ab83129e9a93-kube-api-access-hr4qv\") pod \"migrator-59844c95c7-n59hv\" (UID: \"d1e1ed07-468a-4627-9690-ab83129e9a93\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n59hv" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.175068 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-77xrv" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.179542 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n59hv" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.202097 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-zjxxb" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.204721 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:17 crc kubenswrapper[5014]: E1006 21:33:17.204857 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:17.704838555 +0000 UTC m=+142.997875289 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.204934 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:17 crc kubenswrapper[5014]: E1006 21:33:17.205249 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:17.705241958 +0000 UTC m=+142.998278692 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.306734 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-97wsg"] Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.306779 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:17 crc kubenswrapper[5014]: E1006 21:33:17.306868 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:17.806850453 +0000 UTC m=+143.099887187 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.307063 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:17 crc kubenswrapper[5014]: E1006 21:33:17.307695 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:17.807682571 +0000 UTC m=+143.100719305 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.340494 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-6lc74" event={"ID":"bda928e7-be85-4f32-be06-049b116323d9","Type":"ContainerStarted","Data":"f932790cb77242dcc951b4e71a7d60fcef3b83b1d1cf7372d02ebb9fc308edcc"} Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.341016 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-6lc74" event={"ID":"bda928e7-be85-4f32-be06-049b116323d9","Type":"ContainerStarted","Data":"a8b4de967a4c3f56762c145738ecb32f032eb163398b53f89024c9e60939e42e"} Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.342030 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" event={"ID":"d3d88c45-8fdc-407e-a537-c45a05dedc4b","Type":"ContainerStarted","Data":"551486e20b681345a141c8351954ecf564de4e99dcdaa7cacaa3a54704c03c35"} Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.356859 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" event={"ID":"b9aabe83-7840-47cc-b3d5-72b068737094","Type":"ContainerStarted","Data":"b6a6bf77573e8fb450b57b5302fb6348e423ca3170de1ac758713bce2e4db8af"} Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.356916 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" event={"ID":"b9aabe83-7840-47cc-b3d5-72b068737094","Type":"ContainerStarted","Data":"a8b8cf872fee18dc7142474b9a0fb885937b93ab294b29854beaf1f4fa3ad81f"} Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.357124 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.359664 5014 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-qnjng container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.359702 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" podUID="b9aabe83-7840-47cc-b3d5-72b068737094" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.14:8443/healthz\": dial tcp 10.217.0.14:8443: connect: connection refused" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.361307 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" event={"ID":"04070324-674e-4785-aada-ad9ffe6e89c8","Type":"ContainerStarted","Data":"a63b374e2b15c0781f81854b965d1c222a498df823d168b9df2eff34fbf21c26"} Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.361333 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" event={"ID":"04070324-674e-4785-aada-ad9ffe6e89c8","Type":"ContainerStarted","Data":"e2bc7670257cca0141a182589eafca524f48097d35760fb351f9937204e8533f"} Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.361633 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.363980 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" event={"ID":"a8703611-a93c-4633-8264-4c3ce3eaf77e","Type":"ContainerStarted","Data":"88dbb8245c3fd478e4ba883e0ed3a14af9e3c50bc76154fc2e5b013bc4b39092"} Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.375781 5014 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lts6v container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/healthz\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.377723 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-s45dn" event={"ID":"800f445b-95a6-4098-bf5f-ea91a7ead3d0","Type":"ContainerStarted","Data":"ce40542e401464666543db28d1d6e501385ea483a5751e399f17de1265884594"} Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.375840 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" podUID="04070324-674e-4785-aada-ad9ffe6e89c8" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.6:8080/healthz\": dial tcp 10.217.0.6:8080: connect: connection refused" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.378247 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-s45dn" event={"ID":"800f445b-95a6-4098-bf5f-ea91a7ead3d0","Type":"ContainerStarted","Data":"4b27d1f73e35937a4b2a99c2d179b579116b707a5cec700821516754079c9a55"} Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.380643 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx" event={"ID":"15ca2d23-bdd0-4951-aeb5-6544fd2e43d5","Type":"ContainerStarted","Data":"7d3b079f32b920b9c2f97207c0a17e47c006d8d8d802e14d4f1983db9e3b7fa1"} Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.384933 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-zh4s7" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.388715 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-wd97j" event={"ID":"627924fa-cfa6-404d-91a0-fbf6505ce05b","Type":"ContainerStarted","Data":"c9901515144bd9c3fec674406e684b49850c10baf181cd44212612bd6c35436d"} Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.388774 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-wd97j" event={"ID":"627924fa-cfa6-404d-91a0-fbf6505ce05b","Type":"ContainerStarted","Data":"4b72974abd56724948b65d221f3f81806e4a6800978be86926e30afc4d364f1a"} Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.393753 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" event={"ID":"854e3c81-1547-47c9-b15c-2f226239301d","Type":"ContainerStarted","Data":"e744dea11c69d88b32bbe9bfbe5b7aaa2a9f5a9b267147ea95cc8256c3803443"} Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.398820 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" event={"ID":"48bcee5f-3c11-4784-aa10-c5673058c7b1","Type":"ContainerStarted","Data":"8833357ffa2c7fb32669aac8e4012e6f38da5819aadb86f5199d6e219bd5d726"} Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.398864 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" event={"ID":"48bcee5f-3c11-4784-aa10-c5673058c7b1","Type":"ContainerStarted","Data":"2f0677a493954a44b9500b2756251874b0926c0f04bc551033d184536df17f54"} Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.399241 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.405302 5014 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-pc5xx container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.405362 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" podUID="48bcee5f-3c11-4784-aa10-c5673058c7b1" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.424724 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:17 crc kubenswrapper[5014]: E1006 21:33:17.425026 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:17.925000277 +0000 UTC m=+143.218037011 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.425242 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:17 crc kubenswrapper[5014]: E1006 21:33:17.428385 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:17.92837477 +0000 UTC m=+143.221411504 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.440846 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj"] Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.450652 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv"] Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.469434 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq"] Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.527641 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:17 crc kubenswrapper[5014]: E1006 21:33:17.528015 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:18.027992067 +0000 UTC m=+143.321028801 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.528189 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:17 crc kubenswrapper[5014]: E1006 21:33:17.531872 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:18.031860708 +0000 UTC m=+143.324897442 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.632382 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:17 crc kubenswrapper[5014]: E1006 21:33:17.632785 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:18.13276771 +0000 UTC m=+143.425804474 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.736542 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:17 crc kubenswrapper[5014]: E1006 21:33:17.737223 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:18.23721103 +0000 UTC m=+143.530247764 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.770335 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-kb2p5"] Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.776165 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z"] Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.792131 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-dq58g"] Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.837488 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:17 crc kubenswrapper[5014]: E1006 21:33:17.837917 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:18.337900503 +0000 UTC m=+143.630937237 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.867464 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.942014 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:17 crc kubenswrapper[5014]: E1006 21:33:17.943055 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:18.443039967 +0000 UTC m=+143.736076701 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:17 crc kubenswrapper[5014]: I1006 21:33:17.987700 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" podStartSLOduration=121.987684556 podStartE2EDuration="2m1.987684556s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:17.94368852 +0000 UTC m=+143.236725264" watchObservedRunningTime="2025-10-06 21:33:17.987684556 +0000 UTC m=+143.280721290" Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.043223 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:18 crc kubenswrapper[5014]: E1006 21:33:18.043567 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:18.543539264 +0000 UTC m=+143.836575998 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.044491 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:18 crc kubenswrapper[5014]: E1006 21:33:18.045691 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:18.545675486 +0000 UTC m=+143.838712220 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.150610 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" podStartSLOduration=122.150591093 podStartE2EDuration="2m2.150591093s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:18.145669347 +0000 UTC m=+143.438706081" watchObservedRunningTime="2025-10-06 21:33:18.150591093 +0000 UTC m=+143.443627827" Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.154261 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:18 crc kubenswrapper[5014]: E1006 21:33:18.154442 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:18.654406912 +0000 UTC m=+143.947443656 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.156543 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:18 crc kubenswrapper[5014]: E1006 21:33:18.161310 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:18.661099539 +0000 UTC m=+143.954136283 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.259487 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:18 crc kubenswrapper[5014]: E1006 21:33:18.259783 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:18.759740513 +0000 UTC m=+144.052777247 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.260033 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:18 crc kubenswrapper[5014]: E1006 21:33:18.260453 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:18.760433746 +0000 UTC m=+144.053470690 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.308369 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-wd97j" podStartSLOduration=122.308329374 podStartE2EDuration="2m2.308329374s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:18.307176046 +0000 UTC m=+143.600212780" watchObservedRunningTime="2025-10-06 21:33:18.308329374 +0000 UTC m=+143.601366118" Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.339268 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.354557 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:18 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:18 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:18 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.354710 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.362683 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:18 crc kubenswrapper[5014]: E1006 21:33:18.363032 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:18.863013903 +0000 UTC m=+144.156050637 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.367915 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" podStartSLOduration=122.367893548 podStartE2EDuration="2m2.367893548s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:18.361791172 +0000 UTC m=+143.654827926" watchObservedRunningTime="2025-10-06 21:33:18.367893548 +0000 UTC m=+143.660930272" Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.415412 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" event={"ID":"fbda3f6d-582e-4290-8939-e06c2f971f0e","Type":"ContainerStarted","Data":"0f7951b0697bfed8319f57d955b0a2ab91983ed41776a45d1cf2d6d896dc9071"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.415451 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" event={"ID":"fbda3f6d-582e-4290-8939-e06c2f971f0e","Type":"ContainerStarted","Data":"73e711278eeffe3ccfc8d60fd352e7fbab3bcd32c783bd22c99d0151beae4b48"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.440974 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx" event={"ID":"15ca2d23-bdd0-4951-aeb5-6544fd2e43d5","Type":"ContainerStarted","Data":"66fdc43ea4c05aa0c5d5c24942908a4be1b89146491038656d888060175b344d"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.441060 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx" event={"ID":"15ca2d23-bdd0-4951-aeb5-6544fd2e43d5","Type":"ContainerStarted","Data":"362e6b51f44dc84649068e84e90e6710407a57d9ad36968048e5faf159f401db"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.442068 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx" Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.455240 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-zvv9h"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.457003 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj" event={"ID":"7a0e9e21-4f5b-4e63-b976-1f16d01f357c","Type":"ContainerStarted","Data":"fa016d8d28af868aae326016dec1f28784c24ffc0317029051df3cc1e26c0287"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.457034 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj" event={"ID":"7a0e9e21-4f5b-4e63-b976-1f16d01f357c","Type":"ContainerStarted","Data":"97b2d6e213504d53cc588b70dc60558b0a5b9ff4f14610db2b18257e64c309dc"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.470957 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:18 crc kubenswrapper[5014]: E1006 21:33:18.471253 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:18.971242002 +0000 UTC m=+144.264278736 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.476412 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z" event={"ID":"537a285f-ff1e-4ae8-954d-0ff696453b85","Type":"ContainerStarted","Data":"97235df2f8faab6133a76f165f9b03d868ea2103aafd88f3fa3e6d2ee3924146"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.486266 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.491048 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" event={"ID":"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd","Type":"ContainerStarted","Data":"2bff13ce63348f51c78c6412349696544c34dc2740826dd0b7d66aff67c63266"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.497050 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" event={"ID":"65d0615f-167e-462b-b846-cee104cdeec4","Type":"ContainerStarted","Data":"1bd79d2865cdcd1b75ed4dae8378b44bf04a940dd9fa37ac00bbdb38a71676da"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.497097 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" event={"ID":"65d0615f-167e-462b-b846-cee104cdeec4","Type":"ContainerStarted","Data":"a552d268315099f3f9ce4d18874e841e846fb10d797baed34bc132746b8e0d0a"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.497408 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-v74l6"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.501484 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" event={"ID":"854e3c81-1547-47c9-b15c-2f226239301d","Type":"ContainerStarted","Data":"28ccc13318f01b454799b48b5e106df096eb440eaef67214d20a973d235d7674"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.502920 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-pxwc5"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.504015 5014 generic.go:334] "Generic (PLEG): container finished" podID="d3d88c45-8fdc-407e-a537-c45a05dedc4b" containerID="9b4705ae7ef31c6d404f25ef57859f26eed867665a7368a3c1594e335e47df31" exitCode=0 Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.506340 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" event={"ID":"d3d88c45-8fdc-407e-a537-c45a05dedc4b","Type":"ContainerDied","Data":"9b4705ae7ef31c6d404f25ef57859f26eed867665a7368a3c1594e335e47df31"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.519748 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-97wsg" event={"ID":"86145702-4563-405c-bc92-cddd39e5e750","Type":"ContainerStarted","Data":"2f3e518bb584947ae9532bd6e3953b5c2ebb31d72fa085fa94b9b701b18f89ef"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.523522 5014 generic.go:334] "Generic (PLEG): container finished" podID="a8703611-a93c-4633-8264-4c3ce3eaf77e" containerID="4cae606262bfc672ba99426e57623e7813f1e3d34a548ff8256af4c2141d39e6" exitCode=0 Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.523576 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" event={"ID":"a8703611-a93c-4633-8264-4c3ce3eaf77e","Type":"ContainerDied","Data":"4cae606262bfc672ba99426e57623e7813f1e3d34a548ff8256af4c2141d39e6"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.525116 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" event={"ID":"a6ff5a2f-dbc4-4445-b297-e8627bfb2d04","Type":"ContainerStarted","Data":"57e38f87e6dc3e73ec0348f2d263a5c0b89946f078b5b36930dca63b5914e4d7"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.525471 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" event={"ID":"a6ff5a2f-dbc4-4445-b297-e8627bfb2d04","Type":"ContainerStarted","Data":"8dc5341cfab9df0ac8fc13e9aabace37413ba87f872fc25d6765e9e2c4a7e44d"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.526406 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-kb2p5" event={"ID":"43df857e-f7f9-45e8-97e7-21adc3167678","Type":"ContainerStarted","Data":"56a557245460d4db4647f6ddf232ee62107e90c59362f0e7708b594aa6ce5849"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.526473 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-kb2p5" event={"ID":"43df857e-f7f9-45e8-97e7-21adc3167678","Type":"ContainerStarted","Data":"a64cfac1ca85d19f95c45f010bd9cff708b14311ff1f5c5dc57021467f8cbd50"} Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.528077 5014 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-pc5xx container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.528115 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" podUID="48bcee5f-3c11-4784-aa10-c5673058c7b1" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.528179 5014 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-lts6v container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/healthz\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.528226 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" podUID="04070324-674e-4785-aada-ad9ffe6e89c8" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.6:8080/healthz\": dial tcp 10.217.0.6:8080: connect: connection refused" Oct 06 21:33:18 crc kubenswrapper[5014]: W1006 21:33:18.549783 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef9faff3_6d7d_40f6_99c6_3771e940c03b.slice/crio-465cdacbffcd67ac3aa52ea7ffc38c9c4600d242dd587b9aaa7d738c026c1e71 WatchSource:0}: Error finding container 465cdacbffcd67ac3aa52ea7ffc38c9c4600d242dd587b9aaa7d738c026c1e71: Status 404 returned error can't find the container with id 465cdacbffcd67ac3aa52ea7ffc38c9c4600d242dd587b9aaa7d738c026c1e71 Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.552337 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-s45dn" podStartSLOduration=122.552319632 podStartE2EDuration="2m2.552319632s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:18.550537082 +0000 UTC m=+143.843573816" watchObservedRunningTime="2025-10-06 21:33:18.552319632 +0000 UTC m=+143.845356366" Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.572588 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:18 crc kubenswrapper[5014]: E1006 21:33:18.572877 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:19.072851977 +0000 UTC m=+144.365888711 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.573260 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:18 crc kubenswrapper[5014]: E1006 21:33:18.575932 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:19.07591301 +0000 UTC m=+144.368949734 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.674738 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:18 crc kubenswrapper[5014]: E1006 21:33:18.675146 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:19.175108853 +0000 UTC m=+144.468145587 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.675496 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:18 crc kubenswrapper[5014]: E1006 21:33:18.678832 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:19.178820678 +0000 UTC m=+144.471857412 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.711363 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-6kdh6"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.724301 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.748488 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-ffz9s"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.778078 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.778381 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:18 crc kubenswrapper[5014]: E1006 21:33:18.779663 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:19.279640956 +0000 UTC m=+144.572677690 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.800809 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zjxxb"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.824143 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-wqnvh"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.824195 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.881058 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:18 crc kubenswrapper[5014]: E1006 21:33:18.881558 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:19.381540861 +0000 UTC m=+144.674577595 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:18 crc kubenswrapper[5014]: W1006 21:33:18.891472 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod181953a7_e119_4c51_b16f_f1ec4b63da1e.slice/crio-89e867b7131d3a84b0465d25a13043a6126773735e16842f091ad0b82008b68d WatchSource:0}: Error finding container 89e867b7131d3a84b0465d25a13043a6126773735e16842f091ad0b82008b68d: Status 404 returned error can't find the container with id 89e867b7131d3a84b0465d25a13043a6126773735e16842f091ad0b82008b68d Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.894149 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-k27df"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.903094 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.919885 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.919938 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.932382 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2"] Oct 06 21:33:18 crc kubenswrapper[5014]: W1006 21:33:18.942855 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod01a961cf_0904_406d_862a_027b53178111.slice/crio-d995ee938fd8400d28d244ba95368c9e13142071ef0711efa1efd72c6de1622b WatchSource:0}: Error finding container d995ee938fd8400d28d244ba95368c9e13142071ef0711efa1efd72c6de1622b: Status 404 returned error can't find the container with id d995ee938fd8400d28d244ba95368c9e13142071ef0711efa1efd72c6de1622b Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.957128 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-77xrv"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.961900 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.962837 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-n59hv"] Oct 06 21:33:18 crc kubenswrapper[5014]: I1006 21:33:18.990983 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:18 crc kubenswrapper[5014]: E1006 21:33:18.991293 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:19.49127862 +0000 UTC m=+144.784315354 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.001684 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh"] Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.004591 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq"] Oct 06 21:33:19 crc kubenswrapper[5014]: W1006 21:33:19.018851 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d352ce1_8de3_49ad_83b7_62f38f1864aa.slice/crio-fd8e562189ab0784be3a8b7fc2a09b2c03671719d3bac94c98f242ca8a24a575 WatchSource:0}: Error finding container fd8e562189ab0784be3a8b7fc2a09b2c03671719d3bac94c98f242ca8a24a575: Status 404 returned error can't find the container with id fd8e562189ab0784be3a8b7fc2a09b2c03671719d3bac94c98f242ca8a24a575 Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.018875 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49"] Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.019998 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-zh4s7"] Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.037314 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-f5cd4"] Oct 06 21:33:19 crc kubenswrapper[5014]: W1006 21:33:19.043822 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9ce11448_b517_411b_8bf1_8494fc43116e.slice/crio-32602f663f29f404f1d6327564cc09e3abc57321e7c5c675d05315cd25ef8927 WatchSource:0}: Error finding container 32602f663f29f404f1d6327564cc09e3abc57321e7c5c675d05315cd25ef8927: Status 404 returned error can't find the container with id 32602f663f29f404f1d6327564cc09e3abc57321e7c5c675d05315cd25ef8927 Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.092540 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:19 crc kubenswrapper[5014]: E1006 21:33:19.092871 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:19.592859224 +0000 UTC m=+144.885895958 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.098199 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-6lc74" podStartSLOduration=5.098179384 podStartE2EDuration="5.098179384s" podCreationTimestamp="2025-10-06 21:33:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:19.090337658 +0000 UTC m=+144.383374412" watchObservedRunningTime="2025-10-06 21:33:19.098179384 +0000 UTC m=+144.391216118" Oct 06 21:33:19 crc kubenswrapper[5014]: W1006 21:33:19.100061 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d44c7b8_2cb6_4445_afe1_de6732d5c626.slice/crio-8e7ff107c57f0ca07186e8049e1a5ef895202a18247ab038c05abee9267b039b WatchSource:0}: Error finding container 8e7ff107c57f0ca07186e8049e1a5ef895202a18247ab038c05abee9267b039b: Status 404 returned error can't find the container with id 8e7ff107c57f0ca07186e8049e1a5ef895202a18247ab038c05abee9267b039b Oct 06 21:33:19 crc kubenswrapper[5014]: W1006 21:33:19.148298 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc72ed570_ae2c_4d46_92dd_69f41bda14ca.slice/crio-5d0694eef67e5eb6b0a6a800807c3eddb3f7f6dad3d915403efdda9a0825c533 WatchSource:0}: Error finding container 5d0694eef67e5eb6b0a6a800807c3eddb3f7f6dad3d915403efdda9a0825c533: Status 404 returned error can't find the container with id 5d0694eef67e5eb6b0a6a800807c3eddb3f7f6dad3d915403efdda9a0825c533 Oct 06 21:33:19 crc kubenswrapper[5014]: W1006 21:33:19.177832 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26e227a5_f26a_4bfb_9d4b_0f6718a234b7.slice/crio-d302ac3bf641eb83ad36797c26c88a2feebe9717705bb0da4b069873e7259a59 WatchSource:0}: Error finding container d302ac3bf641eb83ad36797c26c88a2feebe9717705bb0da4b069873e7259a59: Status 404 returned error can't find the container with id d302ac3bf641eb83ad36797c26c88a2feebe9717705bb0da4b069873e7259a59 Oct 06 21:33:19 crc kubenswrapper[5014]: W1006 21:33:19.178510 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda899a8b5_e5c4_456e_a469_203bbcf5445e.slice/crio-729853d1d6875f377e0aa548b794cfeee34aaf8d24546cf1bf3e0ed565357c02 WatchSource:0}: Error finding container 729853d1d6875f377e0aa548b794cfeee34aaf8d24546cf1bf3e0ed565357c02: Status 404 returned error can't find the container with id 729853d1d6875f377e0aa548b794cfeee34aaf8d24546cf1bf3e0ed565357c02 Oct 06 21:33:19 crc kubenswrapper[5014]: W1006 21:33:19.187071 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2bb77525_303a_4691_81d2_0bbeb6eeed9c.slice/crio-13c87176082c047e0990a21657cc501e38ee40d3dace3fa97f55078e84e0e38d WatchSource:0}: Error finding container 13c87176082c047e0990a21657cc501e38ee40d3dace3fa97f55078e84e0e38d: Status 404 returned error can't find the container with id 13c87176082c047e0990a21657cc501e38ee40d3dace3fa97f55078e84e0e38d Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.193482 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:19 crc kubenswrapper[5014]: E1006 21:33:19.193771 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:19.693756475 +0000 UTC m=+144.986793209 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.221730 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-kb2p5" podStartSLOduration=123.221693688 podStartE2EDuration="2m3.221693688s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:19.21256134 +0000 UTC m=+144.505598084" watchObservedRunningTime="2025-10-06 21:33:19.221693688 +0000 UTC m=+144.514730422" Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.294968 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:19 crc kubenswrapper[5014]: E1006 21:33:19.295371 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:19.795359909 +0000 UTC m=+145.088396643 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.358050 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:19 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:19 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:19 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.358132 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.396271 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:19 crc kubenswrapper[5014]: E1006 21:33:19.396763 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:19.896745256 +0000 UTC m=+145.189781990 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.490493 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kgppv" podStartSLOduration=123.490470354 podStartE2EDuration="2m3.490470354s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:19.415281283 +0000 UTC m=+144.708318027" watchObservedRunningTime="2025-10-06 21:33:19.490470354 +0000 UTC m=+144.783507088" Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.498557 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:19 crc kubenswrapper[5014]: E1006 21:33:19.499737 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:19.999720317 +0000 UTC m=+145.292757051 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.533476 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx" podStartSLOduration=123.533453247 podStartE2EDuration="2m3.533453247s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:19.530676443 +0000 UTC m=+144.823713177" watchObservedRunningTime="2025-10-06 21:33:19.533453247 +0000 UTC m=+144.826489981" Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.569882 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" event={"ID":"1753f6a6-b8b0-4bea-9d03-a6d8539a25dd","Type":"ContainerStarted","Data":"640749c39b87a6065d170a5e060e08fad1eeb81548982d88a63bc8989c46719c"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.600773 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-zh4s7" event={"ID":"2bb77525-303a-4691-81d2-0bbeb6eeed9c","Type":"ContainerStarted","Data":"13c87176082c047e0990a21657cc501e38ee40d3dace3fa97f55078e84e0e38d"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.601333 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:19 crc kubenswrapper[5014]: E1006 21:33:19.601767 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:20.101752345 +0000 UTC m=+145.394789079 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.624781 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-k27df" event={"ID":"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036","Type":"ContainerStarted","Data":"c4519c6bb0b713a5508f740e744dafbf5b635eea51a26a83275680f09fed2c73"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.683069 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" event={"ID":"d3d88c45-8fdc-407e-a537-c45a05dedc4b","Type":"ContainerStarted","Data":"de4a958c12268e0168e79c18471eccd3af39635de343a7277f358c221e9e0488"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.703371 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:19 crc kubenswrapper[5014]: E1006 21:33:19.703706 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:20.203694581 +0000 UTC m=+145.496731315 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.723397 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" event={"ID":"54bd74da-566f-468a-a57f-fbbd0bfd2a50","Type":"ContainerStarted","Data":"ab2aca1cae0250856f3a19062b6126e01c9e2db6c6e85445244122bd474bf686"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.723447 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" event={"ID":"54bd74da-566f-468a-a57f-fbbd0bfd2a50","Type":"ContainerStarted","Data":"1ece1a1f22f76d0f63fd8085fd20d1185c2d06eaf0a6cb70cd71642c5dab3e93"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.741411 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" event={"ID":"a8703611-a93c-4633-8264-4c3ce3eaf77e","Type":"ContainerStarted","Data":"79adb3cb6ba2f2fe5295d34677fb6e643103152affffcdc44bf7a68463fe89ea"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.767319 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" event={"ID":"720c691e-a28e-4b39-9571-86e321399306","Type":"ContainerStarted","Data":"cf6c11b6dde3468bf6e490f0312c45a66f897ab6703dae5e894d586ce5647f0d"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.767364 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" event={"ID":"720c691e-a28e-4b39-9571-86e321399306","Type":"ContainerStarted","Data":"f4c6ed903c3395f90e7066961181cce4c93c729200b9de466531c3701c6f41b4"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.767789 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.772980 5014 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-v74l6 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.41:6443/healthz\": dial tcp 10.217.0.41:6443: connect: connection refused" start-of-body= Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.773040 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" podUID="720c691e-a28e-4b39-9571-86e321399306" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.41:6443/healthz\": dial tcp 10.217.0.41:6443: connect: connection refused" Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.778796 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" event={"ID":"2f6a5a5c-5106-44e0-9579-ce54200179b1","Type":"ContainerStarted","Data":"e55fc6986e3bd0c44017a151e10efcefdab219105f9a83303836963bf5cbf6b1"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.782340 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx" event={"ID":"ef9faff3-6d7d-40f6-99c6-3771e940c03b","Type":"ContainerStarted","Data":"df1cef4b30dd52b3fa484a8bca258deb3449c510ca6f52eb32dac8d079f72b13"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.782373 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx" event={"ID":"ef9faff3-6d7d-40f6-99c6-3771e940c03b","Type":"ContainerStarted","Data":"465cdacbffcd67ac3aa52ea7ffc38c9c4600d242dd587b9aaa7d738c026c1e71"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.786652 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n59hv" event={"ID":"d1e1ed07-468a-4627-9690-ab83129e9a93","Type":"ContainerStarted","Data":"2dff11e254c7308187fb06978ebd8e95b421ca1027378f3063fcdccaef0ecd4c"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.789046 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-f5cd4" event={"ID":"26e227a5-f26a-4bfb-9d4b-0f6718a234b7","Type":"ContainerStarted","Data":"d302ac3bf641eb83ad36797c26c88a2feebe9717705bb0da4b069873e7259a59"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.807715 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:19 crc kubenswrapper[5014]: E1006 21:33:19.808801 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:20.308784954 +0000 UTC m=+145.601821688 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.809144 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-97wsg" event={"ID":"86145702-4563-405c-bc92-cddd39e5e750","Type":"ContainerStarted","Data":"4da9e68927a56ee0ce0be486531236d911996966e5989e1aba364bba47bfd2f9"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.809175 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-97wsg" event={"ID":"86145702-4563-405c-bc92-cddd39e5e750","Type":"ContainerStarted","Data":"d179e58c6957740d9a72efb9d4de5e96e9dcba09ee7f797fa7b6cfe5acc20126"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.810181 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-97wsg" Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.828479 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" event={"ID":"01a961cf-0904-406d-862a-027b53178111","Type":"ContainerStarted","Data":"d995ee938fd8400d28d244ba95368c9e13142071ef0711efa1efd72c6de1622b"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.895471 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-wqnvh" event={"ID":"6a8033e0-8d2f-4f40-bab2-b2670cefe0b3","Type":"ContainerStarted","Data":"7cbf4f27001dac75e6b7ca2d80bb292245c4805fd0e61c965f40f14ac0b9fb47"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.912176 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:19 crc kubenswrapper[5014]: E1006 21:33:19.918251 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:20.418229733 +0000 UTC m=+145.711266467 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.945795 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-6kdh6" event={"ID":"8e91dd83-2270-4ddf-bf09-d6d1b8595453","Type":"ContainerStarted","Data":"b791d12341c0a355f3df05a3337ae36094fdb301683b4e6d5c7d726285bc119f"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.952105 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh" event={"ID":"5426ed16-fe90-4dc5-9a28-13d3649074e6","Type":"ContainerStarted","Data":"dd559fbfa7fad250892778418e4cfca9409ff0bbf7f4134575ef0bc03b9e2e2b"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.962826 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49" event={"ID":"c72ed570-ae2c-4d46-92dd-69f41bda14ca","Type":"ContainerStarted","Data":"5d0694eef67e5eb6b0a6a800807c3eddb3f7f6dad3d915403efdda9a0825c533"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.987218 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" event={"ID":"f7160d37-518f-49e6-aee8-ce14c3267c54","Type":"ContainerStarted","Data":"2d3eb5fe194acb72e9deb1478224ffa64285c021485d5e44d9c03fe4cf34b152"} Oct 06 21:33:19 crc kubenswrapper[5014]: I1006 21:33:19.994414 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj" event={"ID":"7a0e9e21-4f5b-4e63-b976-1f16d01f357c","Type":"ContainerStarted","Data":"2914989528a847b91fb39c9c5fbcd975ab6cec392f0e9c74f6cd754438c8cfa7"} Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.003324 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4" event={"ID":"6d352ce1-8de3-49ad-83b7-62f38f1864aa","Type":"ContainerStarted","Data":"fd8e562189ab0784be3a8b7fc2a09b2c03671719d3bac94c98f242ca8a24a575"} Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.033729 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" podStartSLOduration=124.033708277 podStartE2EDuration="2m4.033708277s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:20.022542999 +0000 UTC m=+145.315579753" watchObservedRunningTime="2025-10-06 21:33:20.033708277 +0000 UTC m=+145.326745011" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.037131 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:20 crc kubenswrapper[5014]: E1006 21:33:20.037673 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:20.53764921 +0000 UTC m=+145.830685944 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.039803 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2" event={"ID":"9ce11448-b517-411b-8bf1-8494fc43116e","Type":"ContainerStarted","Data":"32602f663f29f404f1d6327564cc09e3abc57321e7c5c675d05315cd25ef8927"} Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.061938 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" podStartSLOduration=124.061881489 podStartE2EDuration="2m4.061881489s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:20.053490325 +0000 UTC m=+145.346527049" watchObservedRunningTime="2025-10-06 21:33:20.061881489 +0000 UTC m=+145.354918223" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.065883 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-77xrv" event={"ID":"101e93eb-cfad-49df-95ce-b6b12664dd3a","Type":"ContainerStarted","Data":"4ef933ad9c619c7c57b8a1295f30d9ac95fcac2be020e9a4a8663a1130d81b36"} Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.069370 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" event={"ID":"1e659c79-33c6-49d3-a333-5280ece9fa5b","Type":"ContainerStarted","Data":"ceb6aa21b7f3224f948339989163f54bd7b5362a081c015f33ea738fed0d16f1"} Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.077968 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z" event={"ID":"537a285f-ff1e-4ae8-954d-0ff696453b85","Type":"ContainerStarted","Data":"aa4309984e47f6171ac753d007c81e107d6d9a89f8a98bd44b80ba4965a10644"} Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.078413 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.079723 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2" event={"ID":"dcdf5ea0-5d82-4417-89fa-6c37aee2916d","Type":"ContainerStarted","Data":"73fc5ab043e6b5ee27509aaba939f51cc5924ae6d7e0b7b631cc249acf40b8c8"} Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.079748 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2" event={"ID":"dcdf5ea0-5d82-4417-89fa-6c37aee2916d","Type":"ContainerStarted","Data":"7afa48e4f1d7b81350ad6de3f6bc1282f5f768e933194137eb1a8ae5d84621c5"} Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.082286 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-zjxxb" event={"ID":"181953a7-e119-4c51-b16f-f1ec4b63da1e","Type":"ContainerStarted","Data":"89e867b7131d3a84b0465d25a13043a6126773735e16842f091ad0b82008b68d"} Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.082836 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-zjxxb" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.085056 5014 generic.go:334] "Generic (PLEG): container finished" podID="fbda3f6d-582e-4290-8939-e06c2f971f0e" containerID="0f7951b0697bfed8319f57d955b0a2ab91983ed41776a45d1cf2d6d896dc9071" exitCode=0 Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.085434 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" event={"ID":"fbda3f6d-582e-4290-8939-e06c2f971f0e","Type":"ContainerDied","Data":"0f7951b0697bfed8319f57d955b0a2ab91983ed41776a45d1cf2d6d896dc9071"} Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.100726 5014 patch_prober.go:28] interesting pod/console-operator-58897d9998-zjxxb container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.36:8443/readyz\": dial tcp 10.217.0.36:8443: connect: connection refused" start-of-body= Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.100778 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zjxxb" podUID="181953a7-e119-4c51-b16f-f1ec4b63da1e" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.36:8443/readyz\": dial tcp 10.217.0.36:8443: connect: connection refused" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.102271 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qh6cx" podStartSLOduration=124.102262054 podStartE2EDuration="2m4.102262054s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:20.101447047 +0000 UTC m=+145.394483781" watchObservedRunningTime="2025-10-06 21:33:20.102262054 +0000 UTC m=+145.395298788" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.131417 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.139381 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:20 crc kubenswrapper[5014]: E1006 21:33:20.140743 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:20.640725164 +0000 UTC m=+145.933761888 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.157525 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" podStartSLOduration=124.157508171 podStartE2EDuration="2m4.157508171s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:20.156659313 +0000 UTC m=+145.449696037" watchObservedRunningTime="2025-10-06 21:33:20.157508171 +0000 UTC m=+145.450544905" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.166958 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" event={"ID":"a6ff5a2f-dbc4-4445-b297-e8627bfb2d04","Type":"ContainerStarted","Data":"dbb7a0f4bd05e4d9a4c635bee142bdc12b121858a4a353a0366c08e04d620d6e"} Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.168670 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" event={"ID":"6d44c7b8-2cb6-4445-afe1-de6732d5c626","Type":"ContainerStarted","Data":"8e7ff107c57f0ca07186e8049e1a5ef895202a18247ab038c05abee9267b039b"} Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.175171 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" event={"ID":"65d0615f-167e-462b-b846-cee104cdeec4","Type":"ContainerStarted","Data":"bc809c68ef8628baa342d77760e597f6b83aafcbe591a1f24592fe87633956ca"} Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.190768 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-dq58g" podStartSLOduration=124.190748054 podStartE2EDuration="2m4.190748054s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:20.19031266 +0000 UTC m=+145.483349394" watchObservedRunningTime="2025-10-06 21:33:20.190748054 +0000 UTC m=+145.483784788" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.220551 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-97wsg" podStartSLOduration=6.220535312 podStartE2EDuration="6.220535312s" podCreationTimestamp="2025-10-06 21:33:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:20.217876722 +0000 UTC m=+145.510913456" watchObservedRunningTime="2025-10-06 21:33:20.220535312 +0000 UTC m=+145.513572046" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.233769 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-pxwc5" event={"ID":"be279f3d-aa9c-47fc-8396-8ffb895fc9b6","Type":"ContainerStarted","Data":"0f7110d39bcf10528ee90a831c1651e7e608b287ea60b192027cc10ca5b0a104"} Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.243119 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:20 crc kubenswrapper[5014]: E1006 21:33:20.244728 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:20.744710579 +0000 UTC m=+146.037747313 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.269484 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq" event={"ID":"a899a8b5-e5c4-456e-a469-203bbcf5445e","Type":"ContainerStarted","Data":"729853d1d6875f377e0aa548b794cfeee34aaf8d24546cf1bf3e0ed565357c02"} Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.290399 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-zvv9h" podStartSLOduration=124.290382262 podStartE2EDuration="2m4.290382262s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:20.273084918 +0000 UTC m=+145.566121652" watchObservedRunningTime="2025-10-06 21:33:20.290382262 +0000 UTC m=+145.583418996" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.344728 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.346092 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:20 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:20 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:20 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.346158 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:20 crc kubenswrapper[5014]: E1006 21:33:20.353743 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:20.853725214 +0000 UTC m=+146.146761948 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.451576 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:20 crc kubenswrapper[5014]: E1006 21:33:20.452553 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:20.952526284 +0000 UTC m=+146.245563018 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.466568 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-zjxxb" podStartSLOduration=125.466550818 podStartE2EDuration="2m5.466550818s" podCreationTimestamp="2025-10-06 21:31:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:20.464270761 +0000 UTC m=+145.757307495" watchObservedRunningTime="2025-10-06 21:33:20.466550818 +0000 UTC m=+145.759587552" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.557540 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:20 crc kubenswrapper[5014]: E1006 21:33:20.558083 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:21.058067171 +0000 UTC m=+146.351103905 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.590343 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8pkfv" podStartSLOduration=124.590324301 podStartE2EDuration="2m4.590324301s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:20.542903888 +0000 UTC m=+145.835940622" watchObservedRunningTime="2025-10-06 21:33:20.590324301 +0000 UTC m=+145.883361035" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.640560 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-zjlcj" podStartSLOduration=124.640539269 podStartE2EDuration="2m4.640539269s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:20.638311014 +0000 UTC m=+145.931347748" watchObservedRunningTime="2025-10-06 21:33:20.640539269 +0000 UTC m=+145.933576003" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.640937 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gd58z" podStartSLOduration=124.640930973 podStartE2EDuration="2m4.640930973s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:20.592540637 +0000 UTC m=+145.885577371" watchObservedRunningTime="2025-10-06 21:33:20.640930973 +0000 UTC m=+145.933967707" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.659047 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:20 crc kubenswrapper[5014]: E1006 21:33:20.659330 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:21.159313683 +0000 UTC m=+146.452350417 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.691816 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-t2756" podStartSLOduration=125.691795021 podStartE2EDuration="2m5.691795021s" podCreationTimestamp="2025-10-06 21:31:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:20.691019385 +0000 UTC m=+145.984056119" watchObservedRunningTime="2025-10-06 21:33:20.691795021 +0000 UTC m=+145.984831755" Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.760603 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:20 crc kubenswrapper[5014]: E1006 21:33:20.761316 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:21.261300531 +0000 UTC m=+146.554337275 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.861686 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:20 crc kubenswrapper[5014]: E1006 21:33:20.862051 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:21.362034566 +0000 UTC m=+146.655071300 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:20 crc kubenswrapper[5014]: I1006 21:33:20.965126 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:20 crc kubenswrapper[5014]: E1006 21:33:20.966041 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:21.466023521 +0000 UTC m=+146.759060255 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.066612 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:21 crc kubenswrapper[5014]: E1006 21:33:21.067223 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:21.567186811 +0000 UTC m=+146.860223545 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.067583 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:21 crc kubenswrapper[5014]: E1006 21:33:21.068115 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:21.568098762 +0000 UTC m=+146.861135496 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.168756 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:21 crc kubenswrapper[5014]: E1006 21:33:21.169162 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:21.669142287 +0000 UTC m=+146.962179021 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.269902 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:21 crc kubenswrapper[5014]: E1006 21:33:21.270386 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:21.770373819 +0000 UTC m=+147.063410553 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.296993 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-zjxxb" event={"ID":"181953a7-e119-4c51-b16f-f1ec4b63da1e","Type":"ContainerStarted","Data":"0c330eeb99ca876a17ee731b32b3fdc49ca8473fd8b559c7b68ad3e201d55df7"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.298661 5014 patch_prober.go:28] interesting pod/console-operator-58897d9998-zjxxb container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.36:8443/readyz\": dial tcp 10.217.0.36:8443: connect: connection refused" start-of-body= Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.298718 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zjxxb" podUID="181953a7-e119-4c51-b16f-f1ec4b63da1e" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.36:8443/readyz\": dial tcp 10.217.0.36:8443: connect: connection refused" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.316516 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n59hv" event={"ID":"d1e1ed07-468a-4627-9690-ab83129e9a93","Type":"ContainerStarted","Data":"da84c633d124ceb506205fbaa13caa66018fd644b992a92c70522cdf450ce35e"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.316567 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n59hv" event={"ID":"d1e1ed07-468a-4627-9690-ab83129e9a93","Type":"ContainerStarted","Data":"4f4779760248497fa893b172c413501a9ddfe0cc147e8651e0d8951bfe9a891d"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.321662 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-pxwc5" event={"ID":"be279f3d-aa9c-47fc-8396-8ffb895fc9b6","Type":"ContainerStarted","Data":"e31f64d8d7ae8c3c3ccaeb40619c181a49ae31258fa8ae2642bcb6c388655444"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.321718 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-pxwc5" event={"ID":"be279f3d-aa9c-47fc-8396-8ffb895fc9b6","Type":"ContainerStarted","Data":"1158e4e1f3521a6799ceb7636bb967e3fe93a630c68f2f69e4e5153d4e6610d9"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.324084 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh" event={"ID":"5426ed16-fe90-4dc5-9a28-13d3649074e6","Type":"ContainerStarted","Data":"ae898c331c9e1141cb214815d79a5f939569c68cdc0ac85845e829ff8179900e"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.326140 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq" event={"ID":"a899a8b5-e5c4-456e-a469-203bbcf5445e","Type":"ContainerStarted","Data":"52cd0cf4c285653b7dfc898aed53318f538de94012c3f9be60ca630e4a5f67f0"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.339000 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" event={"ID":"2f6a5a5c-5106-44e0-9579-ce54200179b1","Type":"ContainerStarted","Data":"e170376433d96ac48216624f8d214c69c92ead8d0347f9102473ea5104893111"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.340116 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.343882 5014 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-99ns4 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.23:5443/healthz\": dial tcp 10.217.0.23:5443: connect: connection refused" start-of-body= Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.343962 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" podUID="2f6a5a5c-5106-44e0-9579-ce54200179b1" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.23:5443/healthz\": dial tcp 10.217.0.23:5443: connect: connection refused" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.346887 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:21 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:21 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:21 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.346943 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.358529 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-w8lt2" podStartSLOduration=125.358513208 podStartE2EDuration="2m5.358513208s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:20.74084557 +0000 UTC m=+146.033882314" watchObservedRunningTime="2025-10-06 21:33:21.358513208 +0000 UTC m=+146.651549942" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.359754 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-6kdh6" event={"ID":"8e91dd83-2270-4ddf-bf09-d6d1b8595453","Type":"ContainerStarted","Data":"2c6309163dd5406b44b2b1e0f92204aa2f8f2952606dc7089b3e4bc920bc9960"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.368922 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2" event={"ID":"9ce11448-b517-411b-8bf1-8494fc43116e","Type":"ContainerStarted","Data":"59d2988eff33cc8519ba7faaf5574166b31219d6bdacecd2898a65e2ad0cba1f"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.370808 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:21 crc kubenswrapper[5014]: E1006 21:33:21.372087 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:21.872071646 +0000 UTC m=+147.165108380 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.388491 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-f5cd4" event={"ID":"26e227a5-f26a-4bfb-9d4b-0f6718a234b7","Type":"ContainerStarted","Data":"7c6bb87b09dbc37ed103b1723b1230c9d204b5bffec54cc7a3b2259dc7c4b12a"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.401476 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-pxwc5" podStartSLOduration=126.40145375 podStartE2EDuration="2m6.40145375s" podCreationTimestamp="2025-10-06 21:31:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:21.399215714 +0000 UTC m=+146.692252448" watchObservedRunningTime="2025-10-06 21:33:21.40145375 +0000 UTC m=+146.694490484" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.408135 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-n59hv" podStartSLOduration=125.408108065 podStartE2EDuration="2m5.408108065s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:21.362567795 +0000 UTC m=+146.655604529" watchObservedRunningTime="2025-10-06 21:33:21.408108065 +0000 UTC m=+146.701144799" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.420811 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" event={"ID":"6d44c7b8-2cb6-4445-afe1-de6732d5c626","Type":"ContainerStarted","Data":"5789560835c387840743c96dbc90f3dbe92846dfb9b5916620bbb3ec214ae893"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.429800 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8cghq" podStartSLOduration=125.429774157 podStartE2EDuration="2m5.429774157s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:21.423708732 +0000 UTC m=+146.716745466" watchObservedRunningTime="2025-10-06 21:33:21.429774157 +0000 UTC m=+146.722810891" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.432302 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-wqnvh" event={"ID":"6a8033e0-8d2f-4f40-bab2-b2670cefe0b3","Type":"ContainerStarted","Data":"b5b88602dab94935217fe206e28e01ddcbb9e5d83084b52629c9e15b729e56d9"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.445050 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" event={"ID":"fbda3f6d-582e-4290-8939-e06c2f971f0e","Type":"ContainerStarted","Data":"249cb643e6b7f838243b1a72cc69ffb790c515abd73b32e8892646883e9bcf63"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.445887 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.465262 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-77xrv" event={"ID":"101e93eb-cfad-49df-95ce-b6b12664dd3a","Type":"ContainerStarted","Data":"05b4ea3e11a4b22f2d7791e5529657e5bf619bc3ae4abc537cdc2086eb530d1c"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.472432 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:21 crc kubenswrapper[5014]: E1006 21:33:21.473602 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:21.973570667 +0000 UTC m=+147.266607401 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.482395 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" event={"ID":"01a961cf-0904-406d-862a-027b53178111","Type":"ContainerStarted","Data":"5ae1d29d1eab83020ec676461dd5e0d22ee721b6b886da3c475b6df2890d0266"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.482876 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.485308 5014 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-8mlsl container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:8443/healthz\": dial tcp 10.217.0.30:8443: connect: connection refused" start-of-body= Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.485351 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" podUID="01a961cf-0904-406d-862a-027b53178111" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.30:8443/healthz\": dial tcp 10.217.0.30:8443: connect: connection refused" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.493994 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-krsnh" podStartSLOduration=125.493976677 podStartE2EDuration="2m5.493976677s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:21.491550055 +0000 UTC m=+146.784586809" watchObservedRunningTime="2025-10-06 21:33:21.493976677 +0000 UTC m=+146.787013411" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.495002 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" podStartSLOduration=125.494997731 podStartE2EDuration="2m5.494997731s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:21.462724651 +0000 UTC m=+146.755761385" watchObservedRunningTime="2025-10-06 21:33:21.494997731 +0000 UTC m=+146.788034455" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.509836 5014 patch_prober.go:28] interesting pod/downloads-7954f5f757-zh4s7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.509907 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zh4s7" podUID="2bb77525-303a-4691-81d2-0bbeb6eeed9c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.535311 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-zh4s7" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.535346 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-zh4s7" event={"ID":"2bb77525-303a-4691-81d2-0bbeb6eeed9c","Type":"ContainerStarted","Data":"431797e2da86c0ffef18d2652f72ee7d6c95d75c3d46215b7177408053d129c3"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.563029 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4" event={"ID":"6d352ce1-8de3-49ad-83b7-62f38f1864aa","Type":"ContainerStarted","Data":"00ff61c961c087d8523a936ee93b1c3adab014267ff2ec15eda9055a6f1256d4"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.570220 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49" event={"ID":"c72ed570-ae2c-4d46-92dd-69f41bda14ca","Type":"ContainerStarted","Data":"992044f64be8f4823462bccb7cb0804700b9d94a5473e7428b6e993ca81f901d"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.577641 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-f5cd4" podStartSLOduration=7.577607524 podStartE2EDuration="7.577607524s" podCreationTimestamp="2025-10-06 21:33:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:21.535614784 +0000 UTC m=+146.828651508" watchObservedRunningTime="2025-10-06 21:33:21.577607524 +0000 UTC m=+146.870644258" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.578049 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.578308 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.578370 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:21 crc kubenswrapper[5014]: E1006 21:33:21.579173 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.079154157 +0000 UTC m=+147.372190891 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.580898 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:21 crc kubenswrapper[5014]: E1006 21:33:21.583176 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.083157711 +0000 UTC m=+147.376194645 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.599339 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" event={"ID":"f7160d37-518f-49e6-aee8-ce14c3267c54","Type":"ContainerStarted","Data":"0ac705eb80f13a20a1d1973f488b0f02d29f22018c851cb10939ac7e3c7bae4c"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.599401 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" event={"ID":"f7160d37-518f-49e6-aee8-ce14c3267c54","Type":"ContainerStarted","Data":"a53bf03b831fd43947b912a8a08a4270de27e74bd48443a0fe3eff2310d5e1fa"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.608749 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" podStartSLOduration=126.608728756 podStartE2EDuration="2m6.608728756s" podCreationTimestamp="2025-10-06 21:31:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:21.577921575 +0000 UTC m=+146.870958309" watchObservedRunningTime="2025-10-06 21:33:21.608728756 +0000 UTC m=+146.901765490" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.623208 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" event={"ID":"1e659c79-33c6-49d3-a333-5280ece9fa5b","Type":"ContainerStarted","Data":"9a7321f375cc06c6f04b42cc9c645e450c3b940eb3b374323d4bb7feb8985a37"} Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.673766 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tjwg2" podStartSLOduration=125.673735054 podStartE2EDuration="2m5.673735054s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:21.66210672 +0000 UTC m=+146.955143454" watchObservedRunningTime="2025-10-06 21:33:21.673735054 +0000 UTC m=+146.966771788" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.674073 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-77xrv" podStartSLOduration=125.674067975 podStartE2EDuration="2m5.674067975s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:21.612269566 +0000 UTC m=+146.905306300" watchObservedRunningTime="2025-10-06 21:33:21.674067975 +0000 UTC m=+146.967104709" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.682248 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:21 crc kubenswrapper[5014]: E1006 21:33:21.684123 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.184105654 +0000 UTC m=+147.477142388 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.704744 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-q4xf4" podStartSLOduration=125.704721211 podStartE2EDuration="2m5.704721211s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:21.700420926 +0000 UTC m=+146.993457660" watchObservedRunningTime="2025-10-06 21:33:21.704721211 +0000 UTC m=+146.997757945" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.738203 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.738287 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.768282 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8mx49" podStartSLOduration=125.768257578 podStartE2EDuration="2m5.768257578s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:21.758746437 +0000 UTC m=+147.051783171" watchObservedRunningTime="2025-10-06 21:33:21.768257578 +0000 UTC m=+147.061294312" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.785975 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:21 crc kubenswrapper[5014]: E1006 21:33:21.786400 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.28637526 +0000 UTC m=+147.579411994 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.807802 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-zh4s7" podStartSLOduration=125.807786945 podStartE2EDuration="2m5.807786945s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:21.807172664 +0000 UTC m=+147.100209398" watchObservedRunningTime="2025-10-06 21:33:21.807786945 +0000 UTC m=+147.100823679" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.838266 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-ffz9s" podStartSLOduration=125.838248564 podStartE2EDuration="2m5.838248564s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:21.837107156 +0000 UTC m=+147.130143890" watchObservedRunningTime="2025-10-06 21:33:21.838248564 +0000 UTC m=+147.131285298" Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.889070 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:21 crc kubenswrapper[5014]: E1006 21:33:21.889694 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.389672762 +0000 UTC m=+147.682709496 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:21 crc kubenswrapper[5014]: I1006 21:33:21.991667 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:21 crc kubenswrapper[5014]: E1006 21:33:21.992691 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.492668184 +0000 UTC m=+147.785704928 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.093388 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:22 crc kubenswrapper[5014]: E1006 21:33:22.093646 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.593596416 +0000 UTC m=+147.886633150 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.093728 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:22 crc kubenswrapper[5014]: E1006 21:33:22.094292 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.594255598 +0000 UTC m=+147.887292332 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.194880 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:22 crc kubenswrapper[5014]: E1006 21:33:22.195190 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.695140468 +0000 UTC m=+147.988177202 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.195595 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:22 crc kubenswrapper[5014]: E1006 21:33:22.195920 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.695905894 +0000 UTC m=+147.988942628 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.296683 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.297252 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:22 crc kubenswrapper[5014]: E1006 21:33:22.297813 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.797784147 +0000 UTC m=+148.090820881 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.342974 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:22 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:22 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:22 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.343038 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.374907 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" podStartSLOduration=127.374890824 podStartE2EDuration="2m7.374890824s" podCreationTimestamp="2025-10-06 21:31:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:21.885352637 +0000 UTC m=+147.178389371" watchObservedRunningTime="2025-10-06 21:33:22.374890824 +0000 UTC m=+147.667927558" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.399747 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:22 crc kubenswrapper[5014]: E1006 21:33:22.400195 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:22.900176679 +0000 UTC m=+148.193213413 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.501139 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:22 crc kubenswrapper[5014]: E1006 21:33:22.502150 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:23.002121985 +0000 UTC m=+148.295158719 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.603314 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.603361 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.603388 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.603431 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.603461 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:33:22 crc kubenswrapper[5014]: E1006 21:33:22.604898 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:23.104881448 +0000 UTC m=+148.397918182 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.608815 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.610183 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.612166 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.621262 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.635994 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" event={"ID":"6d44c7b8-2cb6-4445-afe1-de6732d5c626","Type":"ContainerStarted","Data":"72b0c893a5f1b61b0e3d777e87d4f3852689c75d77437618b40197f1361bb6d7"} Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.640977 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" event={"ID":"a8703611-a93c-4633-8264-4c3ce3eaf77e","Type":"ContainerStarted","Data":"960b3133826b9fb19f43cd21b58f0f1d7f12a5ad93fa1596a02612b669bacdd8"} Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.642565 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-wqnvh" event={"ID":"6a8033e0-8d2f-4f40-bab2-b2670cefe0b3","Type":"ContainerStarted","Data":"c394b2b369ba6093d1792ab1c0f6118b20ff8cdcb4b60729d56cc619ea4f6261"} Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.647407 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-k27df" event={"ID":"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036","Type":"ContainerStarted","Data":"357e28aaf1fd1e909eb0048d856de752bccf243bc3903f0aeea09eca63b6c1da"} Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.649949 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-6kdh6" event={"ID":"8e91dd83-2270-4ddf-bf09-d6d1b8595453","Type":"ContainerStarted","Data":"3b1d4120154ed4eb3fc911e148736f0e8af239f51042ea37e53caff305906562"} Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.652972 5014 patch_prober.go:28] interesting pod/downloads-7954f5f757-zh4s7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.653017 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zh4s7" podUID="2bb77525-303a-4691-81d2-0bbeb6eeed9c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.658866 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.659999 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8mlsl" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.707067 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:22 crc kubenswrapper[5014]: E1006 21:33:22.707433 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:23.207405674 +0000 UTC m=+148.500442408 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.707796 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.709292 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 06 21:33:22 crc kubenswrapper[5014]: E1006 21:33:22.711731 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:23.211701429 +0000 UTC m=+148.504738153 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.742919 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.756834 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.760000 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9vkf5" podStartSLOduration=126.759979131 podStartE2EDuration="2m6.759979131s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:22.686349462 +0000 UTC m=+147.979386186" watchObservedRunningTime="2025-10-06 21:33:22.759979131 +0000 UTC m=+148.053015875" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.816276 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:22 crc kubenswrapper[5014]: E1006 21:33:22.816605 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:23.316590215 +0000 UTC m=+148.609626949 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.823051 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-6kdh6" podStartSLOduration=126.823034282 podStartE2EDuration="2m6.823034282s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:22.821043235 +0000 UTC m=+148.114079969" watchObservedRunningTime="2025-10-06 21:33:22.823034282 +0000 UTC m=+148.116071016" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.917161 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" podStartSLOduration=126.917143613 podStartE2EDuration="2m6.917143613s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:22.915254 +0000 UTC m=+148.208290744" watchObservedRunningTime="2025-10-06 21:33:22.917143613 +0000 UTC m=+148.210180347" Oct 06 21:33:22 crc kubenswrapper[5014]: I1006 21:33:22.918239 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:22 crc kubenswrapper[5014]: E1006 21:33:22.918610 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:23.418594853 +0000 UTC m=+148.711631597 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.018972 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:23 crc kubenswrapper[5014]: E1006 21:33:23.019197 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:23.519171192 +0000 UTC m=+148.812207926 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.019265 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:23 crc kubenswrapper[5014]: E1006 21:33:23.019676 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:23.519669099 +0000 UTC m=+148.812705833 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.120074 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:23 crc kubenswrapper[5014]: E1006 21:33:23.120396 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:23.620379003 +0000 UTC m=+148.913415737 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.221547 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:23 crc kubenswrapper[5014]: E1006 21:33:23.222211 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:23.722193305 +0000 UTC m=+149.015230039 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.291600 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-wqnvh" podStartSLOduration=127.291562879 podStartE2EDuration="2m7.291562879s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:22.969080649 +0000 UTC m=+148.262117383" watchObservedRunningTime="2025-10-06 21:33:23.291562879 +0000 UTC m=+148.584599613" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.314561 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-zjxxb" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.325006 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:23 crc kubenswrapper[5014]: E1006 21:33:23.325378 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:23.825357132 +0000 UTC m=+149.118393866 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.347302 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:23 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:23 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:23 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.347357 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.426340 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:23 crc kubenswrapper[5014]: E1006 21:33:23.426649 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:23.926636586 +0000 UTC m=+149.219673320 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.528290 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:23 crc kubenswrapper[5014]: E1006 21:33:23.528454 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:24.028434886 +0000 UTC m=+149.321471620 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.528843 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:23 crc kubenswrapper[5014]: E1006 21:33:23.529202 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:24.029192883 +0000 UTC m=+149.322229617 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.580663 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kfzpv"] Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.581742 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.588833 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.630414 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.630675 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lllsh\" (UniqueName: \"kubernetes.io/projected/942343e5-31c5-44bf-accb-42c83a176d0c-kube-api-access-lllsh\") pod \"certified-operators-kfzpv\" (UID: \"942343e5-31c5-44bf-accb-42c83a176d0c\") " pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.630733 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/942343e5-31c5-44bf-accb-42c83a176d0c-catalog-content\") pod \"certified-operators-kfzpv\" (UID: \"942343e5-31c5-44bf-accb-42c83a176d0c\") " pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.630777 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/942343e5-31c5-44bf-accb-42c83a176d0c-utilities\") pod \"certified-operators-kfzpv\" (UID: \"942343e5-31c5-44bf-accb-42c83a176d0c\") " pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:33:23 crc kubenswrapper[5014]: E1006 21:33:23.630879 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:24.130863719 +0000 UTC m=+149.423900453 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.652545 5014 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-99ns4 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.23:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.652687 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" podUID="2f6a5a5c-5106-44e0-9579-ce54200179b1" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.23:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.655396 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"ba6975e92e6b704aa616d042996abb9dacc1b06f377bb55b53787fe6ba548374"} Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.655472 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"b87b273680b642762239be72ee8810766a1a12863fb3559242091b3147f660d3"} Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.677428 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kfzpv"] Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.682010 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-dk7qp" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.685751 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-99ns4" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.733598 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lllsh\" (UniqueName: \"kubernetes.io/projected/942343e5-31c5-44bf-accb-42c83a176d0c-kube-api-access-lllsh\") pod \"certified-operators-kfzpv\" (UID: \"942343e5-31c5-44bf-accb-42c83a176d0c\") " pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.733752 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.733833 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/942343e5-31c5-44bf-accb-42c83a176d0c-catalog-content\") pod \"certified-operators-kfzpv\" (UID: \"942343e5-31c5-44bf-accb-42c83a176d0c\") " pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.733995 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/942343e5-31c5-44bf-accb-42c83a176d0c-utilities\") pod \"certified-operators-kfzpv\" (UID: \"942343e5-31c5-44bf-accb-42c83a176d0c\") " pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.734824 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/942343e5-31c5-44bf-accb-42c83a176d0c-utilities\") pod \"certified-operators-kfzpv\" (UID: \"942343e5-31c5-44bf-accb-42c83a176d0c\") " pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:33:23 crc kubenswrapper[5014]: E1006 21:33:23.735926 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:24.23590487 +0000 UTC m=+149.528941604 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.736440 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/942343e5-31c5-44bf-accb-42c83a176d0c-catalog-content\") pod \"certified-operators-kfzpv\" (UID: \"942343e5-31c5-44bf-accb-42c83a176d0c\") " pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.790940 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lllsh\" (UniqueName: \"kubernetes.io/projected/942343e5-31c5-44bf-accb-42c83a176d0c-kube-api-access-lllsh\") pod \"certified-operators-kfzpv\" (UID: \"942343e5-31c5-44bf-accb-42c83a176d0c\") " pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.835410 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:23 crc kubenswrapper[5014]: E1006 21:33:23.835800 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:24.335784276 +0000 UTC m=+149.628821010 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.898099 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.937427 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:23 crc kubenswrapper[5014]: E1006 21:33:23.937866 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:24.437851366 +0000 UTC m=+149.730888100 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.990715 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ln2v9"] Oct 06 21:33:23 crc kubenswrapper[5014]: I1006 21:33:23.991663 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.010775 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ln2v9"] Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.038136 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.038407 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-utilities\") pod \"certified-operators-ln2v9\" (UID: \"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef\") " pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.038434 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-246cw\" (UniqueName: \"kubernetes.io/projected/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-kube-api-access-246cw\") pod \"certified-operators-ln2v9\" (UID: \"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef\") " pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.038486 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-catalog-content\") pod \"certified-operators-ln2v9\" (UID: \"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef\") " pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:33:24 crc kubenswrapper[5014]: E1006 21:33:24.038572 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:24.53855754 +0000 UTC m=+149.831594274 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.140165 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.140678 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-utilities\") pod \"certified-operators-ln2v9\" (UID: \"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef\") " pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.140705 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-246cw\" (UniqueName: \"kubernetes.io/projected/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-kube-api-access-246cw\") pod \"certified-operators-ln2v9\" (UID: \"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef\") " pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.140781 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-catalog-content\") pod \"certified-operators-ln2v9\" (UID: \"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef\") " pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.141327 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-catalog-content\") pod \"certified-operators-ln2v9\" (UID: \"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef\") " pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:33:24 crc kubenswrapper[5014]: E1006 21:33:24.141706 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:24.641686326 +0000 UTC m=+149.934723070 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.142172 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-utilities\") pod \"certified-operators-ln2v9\" (UID: \"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef\") " pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.174576 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-246cw\" (UniqueName: \"kubernetes.io/projected/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-kube-api-access-246cw\") pod \"certified-operators-ln2v9\" (UID: \"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef\") " pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.177555 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ftjbk"] Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.178720 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.183124 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.204343 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ftjbk"] Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.242734 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.243168 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c68ce8d8-f494-4971-8068-4fddf55fae97-utilities\") pod \"community-operators-ftjbk\" (UID: \"c68ce8d8-f494-4971-8068-4fddf55fae97\") " pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.243250 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c68ce8d8-f494-4971-8068-4fddf55fae97-catalog-content\") pod \"community-operators-ftjbk\" (UID: \"c68ce8d8-f494-4971-8068-4fddf55fae97\") " pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.243275 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhjfp\" (UniqueName: \"kubernetes.io/projected/c68ce8d8-f494-4971-8068-4fddf55fae97-kube-api-access-nhjfp\") pod \"community-operators-ftjbk\" (UID: \"c68ce8d8-f494-4971-8068-4fddf55fae97\") " pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:33:24 crc kubenswrapper[5014]: E1006 21:33:24.243417 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:24.743392314 +0000 UTC m=+150.036429048 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.305517 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.338459 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kfzpv"] Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.344789 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c68ce8d8-f494-4971-8068-4fddf55fae97-utilities\") pod \"community-operators-ftjbk\" (UID: \"c68ce8d8-f494-4971-8068-4fddf55fae97\") " pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.345764 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c68ce8d8-f494-4971-8068-4fddf55fae97-utilities\") pod \"community-operators-ftjbk\" (UID: \"c68ce8d8-f494-4971-8068-4fddf55fae97\") " pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.345910 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.346061 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c68ce8d8-f494-4971-8068-4fddf55fae97-catalog-content\") pod \"community-operators-ftjbk\" (UID: \"c68ce8d8-f494-4971-8068-4fddf55fae97\") " pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.346110 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhjfp\" (UniqueName: \"kubernetes.io/projected/c68ce8d8-f494-4971-8068-4fddf55fae97-kube-api-access-nhjfp\") pod \"community-operators-ftjbk\" (UID: \"c68ce8d8-f494-4971-8068-4fddf55fae97\") " pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.346231 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:24 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:24 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:24 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.346302 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.346423 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c68ce8d8-f494-4971-8068-4fddf55fae97-catalog-content\") pod \"community-operators-ftjbk\" (UID: \"c68ce8d8-f494-4971-8068-4fddf55fae97\") " pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:33:24 crc kubenswrapper[5014]: E1006 21:33:24.346653 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:24.846638323 +0000 UTC m=+150.139675057 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.395668 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cfsf7"] Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.397379 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.400638 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhjfp\" (UniqueName: \"kubernetes.io/projected/c68ce8d8-f494-4971-8068-4fddf55fae97-kube-api-access-nhjfp\") pod \"community-operators-ftjbk\" (UID: \"c68ce8d8-f494-4971-8068-4fddf55fae97\") " pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.408968 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cfsf7"] Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.452880 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.453282 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3393075-b0ae-43e5-9456-039b11d12c43-catalog-content\") pod \"community-operators-cfsf7\" (UID: \"e3393075-b0ae-43e5-9456-039b11d12c43\") " pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.453372 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3393075-b0ae-43e5-9456-039b11d12c43-utilities\") pod \"community-operators-cfsf7\" (UID: \"e3393075-b0ae-43e5-9456-039b11d12c43\") " pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.453534 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfd5h\" (UniqueName: \"kubernetes.io/projected/e3393075-b0ae-43e5-9456-039b11d12c43-kube-api-access-jfd5h\") pod \"community-operators-cfsf7\" (UID: \"e3393075-b0ae-43e5-9456-039b11d12c43\") " pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:33:24 crc kubenswrapper[5014]: E1006 21:33:24.453768 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:24.953748524 +0000 UTC m=+150.246785248 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.500960 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.559685 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3393075-b0ae-43e5-9456-039b11d12c43-catalog-content\") pod \"community-operators-cfsf7\" (UID: \"e3393075-b0ae-43e5-9456-039b11d12c43\") " pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.559731 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3393075-b0ae-43e5-9456-039b11d12c43-utilities\") pod \"community-operators-cfsf7\" (UID: \"e3393075-b0ae-43e5-9456-039b11d12c43\") " pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.559777 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.559808 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfd5h\" (UniqueName: \"kubernetes.io/projected/e3393075-b0ae-43e5-9456-039b11d12c43-kube-api-access-jfd5h\") pod \"community-operators-cfsf7\" (UID: \"e3393075-b0ae-43e5-9456-039b11d12c43\") " pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.561055 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3393075-b0ae-43e5-9456-039b11d12c43-utilities\") pod \"community-operators-cfsf7\" (UID: \"e3393075-b0ae-43e5-9456-039b11d12c43\") " pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:33:24 crc kubenswrapper[5014]: E1006 21:33:24.561068 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:25.061044351 +0000 UTC m=+150.354081085 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.561123 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3393075-b0ae-43e5-9456-039b11d12c43-catalog-content\") pod \"community-operators-cfsf7\" (UID: \"e3393075-b0ae-43e5-9456-039b11d12c43\") " pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.582673 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfd5h\" (UniqueName: \"kubernetes.io/projected/e3393075-b0ae-43e5-9456-039b11d12c43-kube-api-access-jfd5h\") pod \"community-operators-cfsf7\" (UID: \"e3393075-b0ae-43e5-9456-039b11d12c43\") " pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.661113 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:24 crc kubenswrapper[5014]: E1006 21:33:24.661675 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:25.161650822 +0000 UTC m=+150.454687556 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.673503 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"bf41df296f5179b4b16c5724224b915d16ff1322437a410e508dfbf1030000ba"} Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.673546 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"638990cabd53c777d825a5e2fb06a4942cede13c53a26b06a76011bb8cf75811"} Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.674465 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.679591 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-k27df" event={"ID":"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036","Type":"ContainerStarted","Data":"4ff1b1c1dee734e964809d930ce805f851fd008586b3c97c6939f6c90c129f30"} Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.680762 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kfzpv" event={"ID":"942343e5-31c5-44bf-accb-42c83a176d0c","Type":"ContainerStarted","Data":"a3035f52e2c4799f1c09f9429364c3678e73d77433bde23c5e3b061e4118c9e2"} Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.683676 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"5dda1ac6bfa05ecad934a9c306dcec1561757738e6331c36bbf092cb4196c384"} Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.683716 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"bd3426947a9ee64b90d3ec57d6ea960af030a7226f5f1ed4f599158a462bae06"} Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.725161 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.763818 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:24 crc kubenswrapper[5014]: E1006 21:33:24.764976 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:25.264959844 +0000 UTC m=+150.557996648 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.868177 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:24 crc kubenswrapper[5014]: E1006 21:33:24.868316 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:25.368294817 +0000 UTC m=+150.661331551 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.868617 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:24 crc kubenswrapper[5014]: E1006 21:33:24.868916 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:25.368909098 +0000 UTC m=+150.661945832 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.903436 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ln2v9"] Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.970395 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:24 crc kubenswrapper[5014]: E1006 21:33:24.970900 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:25.470875025 +0000 UTC m=+150.763911759 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:24 crc kubenswrapper[5014]: I1006 21:33:24.981063 5014 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.069867 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ftjbk"] Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.072707 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:25 crc kubenswrapper[5014]: E1006 21:33:25.073093 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:25.573075199 +0000 UTC m=+150.866111933 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.073768 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cfsf7"] Oct 06 21:33:25 crc kubenswrapper[5014]: W1006 21:33:25.103696 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode3393075_b0ae_43e5_9456_039b11d12c43.slice/crio-6f2fdfb4d1c5da6bbdfb37d149c9130e02065fe4bdc035f68b9266fb6facd449 WatchSource:0}: Error finding container 6f2fdfb4d1c5da6bbdfb37d149c9130e02065fe4bdc035f68b9266fb6facd449: Status 404 returned error can't find the container with id 6f2fdfb4d1c5da6bbdfb37d149c9130e02065fe4bdc035f68b9266fb6facd449 Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.173747 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:25 crc kubenswrapper[5014]: E1006 21:33:25.173967 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:25.673928168 +0000 UTC m=+150.966964902 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.174318 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:25 crc kubenswrapper[5014]: E1006 21:33:25.174682 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:25.674669553 +0000 UTC m=+150.967706287 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.275780 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:25 crc kubenswrapper[5014]: E1006 21:33:25.275911 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:25.775892434 +0000 UTC m=+151.068929168 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.276067 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:25 crc kubenswrapper[5014]: E1006 21:33:25.276362 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:25.77635486 +0000 UTC m=+151.069391594 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.340505 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:25 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:25 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:25 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.340559 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.377421 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:25 crc kubenswrapper[5014]: E1006 21:33:25.377604 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:25.877573542 +0000 UTC m=+151.170610276 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.377791 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:25 crc kubenswrapper[5014]: E1006 21:33:25.378136 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:25.87811969 +0000 UTC m=+151.171156424 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.478746 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:25 crc kubenswrapper[5014]: E1006 21:33:25.478944 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:25.978920207 +0000 UTC m=+151.271956941 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.479001 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:25 crc kubenswrapper[5014]: E1006 21:33:25.479298 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:25.97928421 +0000 UTC m=+151.272320944 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.579853 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:25 crc kubenswrapper[5014]: E1006 21:33:25.580054 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:26.080036116 +0000 UTC m=+151.373072850 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.580121 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:25 crc kubenswrapper[5014]: E1006 21:33:25.580387 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-06 21:33:26.080380307 +0000 UTC m=+151.373417041 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-297xp" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.681923 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:25 crc kubenswrapper[5014]: E1006 21:33:25.682441 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-06 21:33:26.182409366 +0000 UTC m=+151.475446100 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.689871 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6dcxq" Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.694076 5014 generic.go:334] "Generic (PLEG): container finished" podID="942343e5-31c5-44bf-accb-42c83a176d0c" containerID="c7162139c0e120128611d52236f4c88a60631d9453ac9b088d7c3f3f3a885de7" exitCode=0 Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.694394 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kfzpv" event={"ID":"942343e5-31c5-44bf-accb-42c83a176d0c","Type":"ContainerDied","Data":"c7162139c0e120128611d52236f4c88a60631d9453ac9b088d7c3f3f3a885de7"} Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.697137 5014 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.700887 5014 generic.go:334] "Generic (PLEG): container finished" podID="e3393075-b0ae-43e5-9456-039b11d12c43" containerID="bdc0ca8bbb7b8cf496dd861769d192cb8c93411b3b075ab203fa37156c2eed18" exitCode=0 Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.701002 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfsf7" event={"ID":"e3393075-b0ae-43e5-9456-039b11d12c43","Type":"ContainerDied","Data":"bdc0ca8bbb7b8cf496dd861769d192cb8c93411b3b075ab203fa37156c2eed18"} Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.701059 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfsf7" event={"ID":"e3393075-b0ae-43e5-9456-039b11d12c43","Type":"ContainerStarted","Data":"6f2fdfb4d1c5da6bbdfb37d149c9130e02065fe4bdc035f68b9266fb6facd449"} Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.703597 5014 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-10-06T21:33:24.9810986Z","Handler":null,"Name":""} Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.716011 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" event={"ID":"1e659c79-33c6-49d3-a333-5280ece9fa5b","Type":"ContainerDied","Data":"9a7321f375cc06c6f04b42cc9c645e450c3b940eb3b374323d4bb7feb8985a37"} Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.715946 5014 generic.go:334] "Generic (PLEG): container finished" podID="1e659c79-33c6-49d3-a333-5280ece9fa5b" containerID="9a7321f375cc06c6f04b42cc9c645e450c3b940eb3b374323d4bb7feb8985a37" exitCode=0 Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.729663 5014 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.729719 5014 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.730424 5014 generic.go:334] "Generic (PLEG): container finished" podID="ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" containerID="6d808a1a6540e6851ebc43f9fb6613fa3358bd652d7bd5bfe51d612fde943863" exitCode=0 Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.730516 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ln2v9" event={"ID":"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef","Type":"ContainerDied","Data":"6d808a1a6540e6851ebc43f9fb6613fa3358bd652d7bd5bfe51d612fde943863"} Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.730553 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ln2v9" event={"ID":"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef","Type":"ContainerStarted","Data":"f4716888c81d33c1916775b691ba30a594f715549ae8eeff4310a4c6b5f996e2"} Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.744299 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-k27df" event={"ID":"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036","Type":"ContainerStarted","Data":"bf155f787e84c02ca0ba0e215b14b8d9443979a503533c812df1777a1d21539c"} Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.744360 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-k27df" event={"ID":"09eaec7e-dd40-43f9-b1dd-4ef2de9b0036","Type":"ContainerStarted","Data":"02aff615659503955def593c506b9a7eb4996f68bed2f98b2666b1c67b637e0e"} Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.756091 5014 generic.go:334] "Generic (PLEG): container finished" podID="c68ce8d8-f494-4971-8068-4fddf55fae97" containerID="1840f363ead25ce3fd9b5d47c5abd33013c6cbc5a26ec78abbcba64012de3e38" exitCode=0 Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.756891 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ftjbk" event={"ID":"c68ce8d8-f494-4971-8068-4fddf55fae97","Type":"ContainerDied","Data":"1840f363ead25ce3fd9b5d47c5abd33013c6cbc5a26ec78abbcba64012de3e38"} Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.756929 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ftjbk" event={"ID":"c68ce8d8-f494-4971-8068-4fddf55fae97","Type":"ContainerStarted","Data":"c5bb11604f3bf73e68c4648d746bc6b791fbfdd14066e918555bb9b2cd87f543"} Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.783373 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.788037 5014 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.788079 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.830418 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-297xp\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.866185 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-k27df" podStartSLOduration=11.866168127 podStartE2EDuration="11.866168127s" podCreationTimestamp="2025-10-06 21:33:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:25.863204867 +0000 UTC m=+151.156241601" watchObservedRunningTime="2025-10-06 21:33:25.866168127 +0000 UTC m=+151.159204861" Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.870241 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.886900 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.899393 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.977815 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5wc2h"] Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.979669 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.982159 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Oct 06 21:33:25 crc kubenswrapper[5014]: I1006 21:33:25.988748 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5wc2h"] Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.083493 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-297xp"] Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.095807 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9af08180-3fb3-439d-b8cb-7b65f03c0413-catalog-content\") pod \"redhat-marketplace-5wc2h\" (UID: \"9af08180-3fb3-439d-b8cb-7b65f03c0413\") " pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.095876 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9af08180-3fb3-439d-b8cb-7b65f03c0413-utilities\") pod \"redhat-marketplace-5wc2h\" (UID: \"9af08180-3fb3-439d-b8cb-7b65f03c0413\") " pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.095997 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6n2g\" (UniqueName: \"kubernetes.io/projected/9af08180-3fb3-439d-b8cb-7b65f03c0413-kube-api-access-l6n2g\") pod \"redhat-marketplace-5wc2h\" (UID: \"9af08180-3fb3-439d-b8cb-7b65f03c0413\") " pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:33:26 crc kubenswrapper[5014]: W1006 21:33:26.100951 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3693c72b_1c6a_4362_bda2_6d5ea365cd38.slice/crio-8a39b81de1a634092fda18f8d73d6824e1c7af3670894d6e5bc08b26f4f135f7 WatchSource:0}: Error finding container 8a39b81de1a634092fda18f8d73d6824e1c7af3670894d6e5bc08b26f4f135f7: Status 404 returned error can't find the container with id 8a39b81de1a634092fda18f8d73d6824e1c7af3670894d6e5bc08b26f4f135f7 Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.197215 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6n2g\" (UniqueName: \"kubernetes.io/projected/9af08180-3fb3-439d-b8cb-7b65f03c0413-kube-api-access-l6n2g\") pod \"redhat-marketplace-5wc2h\" (UID: \"9af08180-3fb3-439d-b8cb-7b65f03c0413\") " pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.197269 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9af08180-3fb3-439d-b8cb-7b65f03c0413-catalog-content\") pod \"redhat-marketplace-5wc2h\" (UID: \"9af08180-3fb3-439d-b8cb-7b65f03c0413\") " pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.197293 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9af08180-3fb3-439d-b8cb-7b65f03c0413-utilities\") pod \"redhat-marketplace-5wc2h\" (UID: \"9af08180-3fb3-439d-b8cb-7b65f03c0413\") " pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.197837 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9af08180-3fb3-439d-b8cb-7b65f03c0413-utilities\") pod \"redhat-marketplace-5wc2h\" (UID: \"9af08180-3fb3-439d-b8cb-7b65f03c0413\") " pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.198951 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9af08180-3fb3-439d-b8cb-7b65f03c0413-catalog-content\") pod \"redhat-marketplace-5wc2h\" (UID: \"9af08180-3fb3-439d-b8cb-7b65f03c0413\") " pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.217652 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6n2g\" (UniqueName: \"kubernetes.io/projected/9af08180-3fb3-439d-b8cb-7b65f03c0413-kube-api-access-l6n2g\") pod \"redhat-marketplace-5wc2h\" (UID: \"9af08180-3fb3-439d-b8cb-7b65f03c0413\") " pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.265558 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.297742 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.310956 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.337690 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.342357 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:26 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:26 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:26 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.342487 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.380948 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-l926w"] Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.382657 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.398700 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l926w"] Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.501314 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5ad6e3f-cae5-465c-9744-abc1f63afd99-utilities\") pod \"redhat-marketplace-l926w\" (UID: \"a5ad6e3f-cae5-465c-9744-abc1f63afd99\") " pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.501370 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5ad6e3f-cae5-465c-9744-abc1f63afd99-catalog-content\") pod \"redhat-marketplace-l926w\" (UID: \"a5ad6e3f-cae5-465c-9744-abc1f63afd99\") " pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.501535 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftprd\" (UniqueName: \"kubernetes.io/projected/a5ad6e3f-cae5-465c-9744-abc1f63afd99-kube-api-access-ftprd\") pod \"redhat-marketplace-l926w\" (UID: \"a5ad6e3f-cae5-465c-9744-abc1f63afd99\") " pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.602803 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftprd\" (UniqueName: \"kubernetes.io/projected/a5ad6e3f-cae5-465c-9744-abc1f63afd99-kube-api-access-ftprd\") pod \"redhat-marketplace-l926w\" (UID: \"a5ad6e3f-cae5-465c-9744-abc1f63afd99\") " pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.603389 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5ad6e3f-cae5-465c-9744-abc1f63afd99-utilities\") pod \"redhat-marketplace-l926w\" (UID: \"a5ad6e3f-cae5-465c-9744-abc1f63afd99\") " pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.603443 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5ad6e3f-cae5-465c-9744-abc1f63afd99-catalog-content\") pod \"redhat-marketplace-l926w\" (UID: \"a5ad6e3f-cae5-465c-9744-abc1f63afd99\") " pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.604459 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5ad6e3f-cae5-465c-9744-abc1f63afd99-catalog-content\") pod \"redhat-marketplace-l926w\" (UID: \"a5ad6e3f-cae5-465c-9744-abc1f63afd99\") " pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.604679 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5ad6e3f-cae5-465c-9744-abc1f63afd99-utilities\") pod \"redhat-marketplace-l926w\" (UID: \"a5ad6e3f-cae5-465c-9744-abc1f63afd99\") " pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.656573 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.656637 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.661876 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftprd\" (UniqueName: \"kubernetes.io/projected/a5ad6e3f-cae5-465c-9744-abc1f63afd99-kube-api-access-ftprd\") pod \"redhat-marketplace-l926w\" (UID: \"a5ad6e3f-cae5-465c-9744-abc1f63afd99\") " pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.700174 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.718244 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.780947 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.780985 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.782288 5014 patch_prober.go:28] interesting pod/console-f9d7485db-kb2p5 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.21:8443/health\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.782322 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-kb2p5" podUID="43df857e-f7f9-45e8-97e7-21adc3167678" containerName="console" probeResult="failure" output="Get \"https://10.217.0.21:8443/health\": dial tcp 10.217.0.21:8443: connect: connection refused" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.807448 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-297xp" event={"ID":"3693c72b-1c6a-4362-bda2-6d5ea365cd38","Type":"ContainerStarted","Data":"b3c9ab86406d8447ab28f7bbb8394d5351290adec3fb30215d866f8dcf4baea6"} Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.807482 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-297xp" event={"ID":"3693c72b-1c6a-4362-bda2-6d5ea365cd38","Type":"ContainerStarted","Data":"8a39b81de1a634092fda18f8d73d6824e1c7af3670894d6e5bc08b26f4f135f7"} Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.807987 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.824061 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-vdpkd" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.836872 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-297xp" podStartSLOduration=130.836850678 podStartE2EDuration="2m10.836850678s" podCreationTimestamp="2025-10-06 21:31:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:26.833690362 +0000 UTC m=+152.126727116" watchObservedRunningTime="2025-10-06 21:33:26.836850678 +0000 UTC m=+152.129887412" Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.851285 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5wc2h"] Oct 06 21:33:26 crc kubenswrapper[5014]: W1006 21:33:26.896838 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9af08180_3fb3_439d_b8cb_7b65f03c0413.slice/crio-a97d323a3cec8637212efbeb72fce8bbcdc30fe7250ea610b3480ab9ce94f948 WatchSource:0}: Error finding container a97d323a3cec8637212efbeb72fce8bbcdc30fe7250ea610b3480ab9ce94f948: Status 404 returned error can't find the container with id a97d323a3cec8637212efbeb72fce8bbcdc30fe7250ea610b3480ab9ce94f948 Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.990461 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cgv2z"] Oct 06 21:33:26 crc kubenswrapper[5014]: I1006 21:33:26.992319 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.001058 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.007275 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cgv2z"] Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.014747 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9557159b-e76f-4958-8d67-87c9da20b9ac-catalog-content\") pod \"redhat-operators-cgv2z\" (UID: \"9557159b-e76f-4958-8d67-87c9da20b9ac\") " pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.014795 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wzpg\" (UniqueName: \"kubernetes.io/projected/9557159b-e76f-4958-8d67-87c9da20b9ac-kube-api-access-5wzpg\") pod \"redhat-operators-cgv2z\" (UID: \"9557159b-e76f-4958-8d67-87c9da20b9ac\") " pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.014880 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9557159b-e76f-4958-8d67-87c9da20b9ac-utilities\") pod \"redhat-operators-cgv2z\" (UID: \"9557159b-e76f-4958-8d67-87c9da20b9ac\") " pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.115953 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9557159b-e76f-4958-8d67-87c9da20b9ac-utilities\") pod \"redhat-operators-cgv2z\" (UID: \"9557159b-e76f-4958-8d67-87c9da20b9ac\") " pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.116021 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9557159b-e76f-4958-8d67-87c9da20b9ac-catalog-content\") pod \"redhat-operators-cgv2z\" (UID: \"9557159b-e76f-4958-8d67-87c9da20b9ac\") " pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.116044 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wzpg\" (UniqueName: \"kubernetes.io/projected/9557159b-e76f-4958-8d67-87c9da20b9ac-kube-api-access-5wzpg\") pod \"redhat-operators-cgv2z\" (UID: \"9557159b-e76f-4958-8d67-87c9da20b9ac\") " pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.116743 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9557159b-e76f-4958-8d67-87c9da20b9ac-utilities\") pod \"redhat-operators-cgv2z\" (UID: \"9557159b-e76f-4958-8d67-87c9da20b9ac\") " pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.116964 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9557159b-e76f-4958-8d67-87c9da20b9ac-catalog-content\") pod \"redhat-operators-cgv2z\" (UID: \"9557159b-e76f-4958-8d67-87c9da20b9ac\") " pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.142845 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wzpg\" (UniqueName: \"kubernetes.io/projected/9557159b-e76f-4958-8d67-87c9da20b9ac-kube-api-access-5wzpg\") pod \"redhat-operators-cgv2z\" (UID: \"9557159b-e76f-4958-8d67-87c9da20b9ac\") " pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.177757 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.179116 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.182037 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.187987 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.191565 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.216955 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/5405c6a7-b9df-49ca-ba22-728e44178950-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"5405c6a7-b9df-49ca-ba22-728e44178950\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.217330 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5405c6a7-b9df-49ca-ba22-728e44178950-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"5405c6a7-b9df-49ca-ba22-728e44178950\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.311609 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.314984 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.318313 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/5405c6a7-b9df-49ca-ba22-728e44178950-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"5405c6a7-b9df-49ca-ba22-728e44178950\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.318360 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5405c6a7-b9df-49ca-ba22-728e44178950-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"5405c6a7-b9df-49ca-ba22-728e44178950\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.318529 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/5405c6a7-b9df-49ca-ba22-728e44178950-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"5405c6a7-b9df-49ca-ba22-728e44178950\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.337233 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5405c6a7-b9df-49ca-ba22-728e44178950-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"5405c6a7-b9df-49ca-ba22-728e44178950\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.342733 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:27 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:27 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:27 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.342818 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.380509 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7xdvs"] Oct 06 21:33:27 crc kubenswrapper[5014]: E1006 21:33:27.380809 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e659c79-33c6-49d3-a333-5280ece9fa5b" containerName="collect-profiles" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.380832 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e659c79-33c6-49d3-a333-5280ece9fa5b" containerName="collect-profiles" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.380929 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e659c79-33c6-49d3-a333-5280ece9fa5b" containerName="collect-profiles" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.381767 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.387461 5014 patch_prober.go:28] interesting pod/downloads-7954f5f757-zh4s7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.387510 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zh4s7" podUID="2bb77525-303a-4691-81d2-0bbeb6eeed9c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.387461 5014 patch_prober.go:28] interesting pod/downloads-7954f5f757-zh4s7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.387710 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-zh4s7" podUID="2bb77525-303a-4691-81d2-0bbeb6eeed9c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.392608 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7xdvs"] Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.422400 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e659c79-33c6-49d3-a333-5280ece9fa5b-config-volume\") pod \"1e659c79-33c6-49d3-a333-5280ece9fa5b\" (UID: \"1e659c79-33c6-49d3-a333-5280ece9fa5b\") " Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.422453 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1e659c79-33c6-49d3-a333-5280ece9fa5b-secret-volume\") pod \"1e659c79-33c6-49d3-a333-5280ece9fa5b\" (UID: \"1e659c79-33c6-49d3-a333-5280ece9fa5b\") " Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.422534 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6wrq\" (UniqueName: \"kubernetes.io/projected/1e659c79-33c6-49d3-a333-5280ece9fa5b-kube-api-access-k6wrq\") pod \"1e659c79-33c6-49d3-a333-5280ece9fa5b\" (UID: \"1e659c79-33c6-49d3-a333-5280ece9fa5b\") " Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.422774 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49f3778d-0532-4fa4-9635-500e98f8b8e9-utilities\") pod \"redhat-operators-7xdvs\" (UID: \"49f3778d-0532-4fa4-9635-500e98f8b8e9\") " pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.422845 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcfk5\" (UniqueName: \"kubernetes.io/projected/49f3778d-0532-4fa4-9635-500e98f8b8e9-kube-api-access-mcfk5\") pod \"redhat-operators-7xdvs\" (UID: \"49f3778d-0532-4fa4-9635-500e98f8b8e9\") " pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.422893 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49f3778d-0532-4fa4-9635-500e98f8b8e9-catalog-content\") pod \"redhat-operators-7xdvs\" (UID: \"49f3778d-0532-4fa4-9635-500e98f8b8e9\") " pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.424384 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e659c79-33c6-49d3-a333-5280ece9fa5b-config-volume" (OuterVolumeSpecName: "config-volume") pod "1e659c79-33c6-49d3-a333-5280ece9fa5b" (UID: "1e659c79-33c6-49d3-a333-5280ece9fa5b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.427815 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e659c79-33c6-49d3-a333-5280ece9fa5b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1e659c79-33c6-49d3-a333-5280ece9fa5b" (UID: "1e659c79-33c6-49d3-a333-5280ece9fa5b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.433976 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e659c79-33c6-49d3-a333-5280ece9fa5b-kube-api-access-k6wrq" (OuterVolumeSpecName: "kube-api-access-k6wrq") pod "1e659c79-33c6-49d3-a333-5280ece9fa5b" (UID: "1e659c79-33c6-49d3-a333-5280ece9fa5b"). InnerVolumeSpecName "kube-api-access-k6wrq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.501791 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.524427 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l926w"] Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.525120 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49f3778d-0532-4fa4-9635-500e98f8b8e9-catalog-content\") pod \"redhat-operators-7xdvs\" (UID: \"49f3778d-0532-4fa4-9635-500e98f8b8e9\") " pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.525192 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49f3778d-0532-4fa4-9635-500e98f8b8e9-utilities\") pod \"redhat-operators-7xdvs\" (UID: \"49f3778d-0532-4fa4-9635-500e98f8b8e9\") " pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.525262 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcfk5\" (UniqueName: \"kubernetes.io/projected/49f3778d-0532-4fa4-9635-500e98f8b8e9-kube-api-access-mcfk5\") pod \"redhat-operators-7xdvs\" (UID: \"49f3778d-0532-4fa4-9635-500e98f8b8e9\") " pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.525338 5014 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1e659c79-33c6-49d3-a333-5280ece9fa5b-config-volume\") on node \"crc\" DevicePath \"\"" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.525351 5014 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1e659c79-33c6-49d3-a333-5280ece9fa5b-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.525366 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6wrq\" (UniqueName: \"kubernetes.io/projected/1e659c79-33c6-49d3-a333-5280ece9fa5b-kube-api-access-k6wrq\") on node \"crc\" DevicePath \"\"" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.528997 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49f3778d-0532-4fa4-9635-500e98f8b8e9-catalog-content\") pod \"redhat-operators-7xdvs\" (UID: \"49f3778d-0532-4fa4-9635-500e98f8b8e9\") " pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.532141 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49f3778d-0532-4fa4-9635-500e98f8b8e9-utilities\") pod \"redhat-operators-7xdvs\" (UID: \"49f3778d-0532-4fa4-9635-500e98f8b8e9\") " pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.555738 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcfk5\" (UniqueName: \"kubernetes.io/projected/49f3778d-0532-4fa4-9635-500e98f8b8e9-kube-api-access-mcfk5\") pod \"redhat-operators-7xdvs\" (UID: \"49f3778d-0532-4fa4-9635-500e98f8b8e9\") " pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.558072 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.714249 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.768544 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cgv2z"] Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.853242 5014 generic.go:334] "Generic (PLEG): container finished" podID="9af08180-3fb3-439d-b8cb-7b65f03c0413" containerID="830819e70c69de62dc8a5490c09f47ffac6eae89343f26e51219874da8dc3a55" exitCode=0 Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.853348 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5wc2h" event={"ID":"9af08180-3fb3-439d-b8cb-7b65f03c0413","Type":"ContainerDied","Data":"830819e70c69de62dc8a5490c09f47ffac6eae89343f26e51219874da8dc3a55"} Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.853378 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5wc2h" event={"ID":"9af08180-3fb3-439d-b8cb-7b65f03c0413","Type":"ContainerStarted","Data":"a97d323a3cec8637212efbeb72fce8bbcdc30fe7250ea610b3480ab9ce94f948"} Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.863535 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l926w" event={"ID":"a5ad6e3f-cae5-465c-9744-abc1f63afd99","Type":"ContainerStarted","Data":"35690bd7fc793d01fb1ddb4211cce4a7546cf5844409770bfa34b712c499c7ce"} Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.866025 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cgv2z" event={"ID":"9557159b-e76f-4958-8d67-87c9da20b9ac","Type":"ContainerStarted","Data":"043fca04a620c08076105b12f5b1694cd14d45e2c83e101501e6df474b5ce358"} Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.870857 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.871237 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb" event={"ID":"1e659c79-33c6-49d3-a333-5280ece9fa5b","Type":"ContainerDied","Data":"ceb6aa21b7f3224f948339989163f54bd7b5362a081c015f33ea738fed0d16f1"} Oct 06 21:33:27 crc kubenswrapper[5014]: I1006 21:33:27.871256 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ceb6aa21b7f3224f948339989163f54bd7b5362a081c015f33ea738fed0d16f1" Oct 06 21:33:28 crc kubenswrapper[5014]: I1006 21:33:28.101134 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Oct 06 21:33:28 crc kubenswrapper[5014]: I1006 21:33:28.150332 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7xdvs"] Oct 06 21:33:28 crc kubenswrapper[5014]: W1006 21:33:28.179012 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod49f3778d_0532_4fa4_9635_500e98f8b8e9.slice/crio-47cb5a9dc0ab5fff97c848d718e3eb4006452b78c7702289891a26efd8bd3833 WatchSource:0}: Error finding container 47cb5a9dc0ab5fff97c848d718e3eb4006452b78c7702289891a26efd8bd3833: Status 404 returned error can't find the container with id 47cb5a9dc0ab5fff97c848d718e3eb4006452b78c7702289891a26efd8bd3833 Oct 06 21:33:28 crc kubenswrapper[5014]: I1006 21:33:28.341400 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:28 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:28 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:28 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:28 crc kubenswrapper[5014]: I1006 21:33:28.341472 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:28 crc kubenswrapper[5014]: I1006 21:33:28.750557 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-97wsg" Oct 06 21:33:28 crc kubenswrapper[5014]: I1006 21:33:28.887670 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"5405c6a7-b9df-49ca-ba22-728e44178950","Type":"ContainerStarted","Data":"97c97d32e0fc05fc677fd8b496fb6371ab6c2c53545d945efa6b62a052b870f1"} Oct 06 21:33:28 crc kubenswrapper[5014]: I1006 21:33:28.887725 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"5405c6a7-b9df-49ca-ba22-728e44178950","Type":"ContainerStarted","Data":"c29cf4bbc84b2d55cbf38dd589d6689f0d95cf7715337da4cabd33639cf6fe56"} Oct 06 21:33:28 crc kubenswrapper[5014]: I1006 21:33:28.891478 5014 generic.go:334] "Generic (PLEG): container finished" podID="49f3778d-0532-4fa4-9635-500e98f8b8e9" containerID="43231e83e0e2ebc7a9c08b4a9414c0acf9199208da4ba5a53b2a672809e2d927" exitCode=0 Oct 06 21:33:28 crc kubenswrapper[5014]: I1006 21:33:28.891519 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xdvs" event={"ID":"49f3778d-0532-4fa4-9635-500e98f8b8e9","Type":"ContainerDied","Data":"43231e83e0e2ebc7a9c08b4a9414c0acf9199208da4ba5a53b2a672809e2d927"} Oct 06 21:33:28 crc kubenswrapper[5014]: I1006 21:33:28.891535 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xdvs" event={"ID":"49f3778d-0532-4fa4-9635-500e98f8b8e9","Type":"ContainerStarted","Data":"47cb5a9dc0ab5fff97c848d718e3eb4006452b78c7702289891a26efd8bd3833"} Oct 06 21:33:28 crc kubenswrapper[5014]: I1006 21:33:28.907353 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=1.907335706 podStartE2EDuration="1.907335706s" podCreationTimestamp="2025-10-06 21:33:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:28.90627898 +0000 UTC m=+154.199315714" watchObservedRunningTime="2025-10-06 21:33:28.907335706 +0000 UTC m=+154.200372430" Oct 06 21:33:28 crc kubenswrapper[5014]: I1006 21:33:28.950118 5014 generic.go:334] "Generic (PLEG): container finished" podID="a5ad6e3f-cae5-465c-9744-abc1f63afd99" containerID="fdd618b338fbe46dc1a36cde9f496e82fba0546601bba2997438785091fcb11e" exitCode=0 Oct 06 21:33:28 crc kubenswrapper[5014]: I1006 21:33:28.950214 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l926w" event={"ID":"a5ad6e3f-cae5-465c-9744-abc1f63afd99","Type":"ContainerDied","Data":"fdd618b338fbe46dc1a36cde9f496e82fba0546601bba2997438785091fcb11e"} Oct 06 21:33:28 crc kubenswrapper[5014]: I1006 21:33:28.962407 5014 generic.go:334] "Generic (PLEG): container finished" podID="9557159b-e76f-4958-8d67-87c9da20b9ac" containerID="26ea44509b670ee6f169ff43f00ff10a37fe0a55018293a183a844289d0c83ec" exitCode=0 Oct 06 21:33:28 crc kubenswrapper[5014]: I1006 21:33:28.962694 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cgv2z" event={"ID":"9557159b-e76f-4958-8d67-87c9da20b9ac","Type":"ContainerDied","Data":"26ea44509b670ee6f169ff43f00ff10a37fe0a55018293a183a844289d0c83ec"} Oct 06 21:33:29 crc kubenswrapper[5014]: I1006 21:33:29.341049 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:29 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:29 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:29 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:29 crc kubenswrapper[5014]: I1006 21:33:29.341105 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:29 crc kubenswrapper[5014]: I1006 21:33:29.423178 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Oct 06 21:33:29 crc kubenswrapper[5014]: I1006 21:33:29.423842 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 06 21:33:29 crc kubenswrapper[5014]: I1006 21:33:29.426327 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Oct 06 21:33:29 crc kubenswrapper[5014]: I1006 21:33:29.426395 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Oct 06 21:33:29 crc kubenswrapper[5014]: I1006 21:33:29.478851 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Oct 06 21:33:29 crc kubenswrapper[5014]: I1006 21:33:29.567458 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/14817aec-b036-4806-9db0-4680b1bfadc1-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"14817aec-b036-4806-9db0-4680b1bfadc1\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 06 21:33:29 crc kubenswrapper[5014]: I1006 21:33:29.567533 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14817aec-b036-4806-9db0-4680b1bfadc1-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"14817aec-b036-4806-9db0-4680b1bfadc1\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 06 21:33:29 crc kubenswrapper[5014]: I1006 21:33:29.672188 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/14817aec-b036-4806-9db0-4680b1bfadc1-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"14817aec-b036-4806-9db0-4680b1bfadc1\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 06 21:33:29 crc kubenswrapper[5014]: I1006 21:33:29.672253 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14817aec-b036-4806-9db0-4680b1bfadc1-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"14817aec-b036-4806-9db0-4680b1bfadc1\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 06 21:33:29 crc kubenswrapper[5014]: I1006 21:33:29.672802 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/14817aec-b036-4806-9db0-4680b1bfadc1-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"14817aec-b036-4806-9db0-4680b1bfadc1\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 06 21:33:29 crc kubenswrapper[5014]: I1006 21:33:29.697959 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14817aec-b036-4806-9db0-4680b1bfadc1-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"14817aec-b036-4806-9db0-4680b1bfadc1\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 06 21:33:29 crc kubenswrapper[5014]: I1006 21:33:29.800027 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 06 21:33:29 crc kubenswrapper[5014]: I1006 21:33:29.972629 5014 generic.go:334] "Generic (PLEG): container finished" podID="5405c6a7-b9df-49ca-ba22-728e44178950" containerID="97c97d32e0fc05fc677fd8b496fb6371ab6c2c53545d945efa6b62a052b870f1" exitCode=0 Oct 06 21:33:29 crc kubenswrapper[5014]: I1006 21:33:29.972673 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"5405c6a7-b9df-49ca-ba22-728e44178950","Type":"ContainerDied","Data":"97c97d32e0fc05fc677fd8b496fb6371ab6c2c53545d945efa6b62a052b870f1"} Oct 06 21:33:30 crc kubenswrapper[5014]: I1006 21:33:30.341098 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:30 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:30 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:30 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:30 crc kubenswrapper[5014]: I1006 21:33:30.341861 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:30 crc kubenswrapper[5014]: I1006 21:33:30.382022 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Oct 06 21:33:30 crc kubenswrapper[5014]: W1006 21:33:30.460392 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod14817aec_b036_4806_9db0_4680b1bfadc1.slice/crio-00bd2beb0ede7aeafc39890e77c388206b1de4c1f6416df716b9f5bbc569234f WatchSource:0}: Error finding container 00bd2beb0ede7aeafc39890e77c388206b1de4c1f6416df716b9f5bbc569234f: Status 404 returned error can't find the container with id 00bd2beb0ede7aeafc39890e77c388206b1de4c1f6416df716b9f5bbc569234f Oct 06 21:33:31 crc kubenswrapper[5014]: I1006 21:33:31.013663 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"14817aec-b036-4806-9db0-4680b1bfadc1","Type":"ContainerStarted","Data":"00bd2beb0ede7aeafc39890e77c388206b1de4c1f6416df716b9f5bbc569234f"} Oct 06 21:33:31 crc kubenswrapper[5014]: I1006 21:33:31.341209 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:31 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:31 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:31 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:31 crc kubenswrapper[5014]: I1006 21:33:31.341275 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:31 crc kubenswrapper[5014]: I1006 21:33:31.549405 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 06 21:33:31 crc kubenswrapper[5014]: I1006 21:33:31.717961 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/5405c6a7-b9df-49ca-ba22-728e44178950-kubelet-dir\") pod \"5405c6a7-b9df-49ca-ba22-728e44178950\" (UID: \"5405c6a7-b9df-49ca-ba22-728e44178950\") " Oct 06 21:33:31 crc kubenswrapper[5014]: I1006 21:33:31.718045 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5405c6a7-b9df-49ca-ba22-728e44178950-kube-api-access\") pod \"5405c6a7-b9df-49ca-ba22-728e44178950\" (UID: \"5405c6a7-b9df-49ca-ba22-728e44178950\") " Oct 06 21:33:31 crc kubenswrapper[5014]: I1006 21:33:31.718084 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5405c6a7-b9df-49ca-ba22-728e44178950-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "5405c6a7-b9df-49ca-ba22-728e44178950" (UID: "5405c6a7-b9df-49ca-ba22-728e44178950"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:33:31 crc kubenswrapper[5014]: I1006 21:33:31.718325 5014 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/5405c6a7-b9df-49ca-ba22-728e44178950-kubelet-dir\") on node \"crc\" DevicePath \"\"" Oct 06 21:33:31 crc kubenswrapper[5014]: I1006 21:33:31.725769 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5405c6a7-b9df-49ca-ba22-728e44178950-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "5405c6a7-b9df-49ca-ba22-728e44178950" (UID: "5405c6a7-b9df-49ca-ba22-728e44178950"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:33:31 crc kubenswrapper[5014]: I1006 21:33:31.819884 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5405c6a7-b9df-49ca-ba22-728e44178950-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 06 21:33:32 crc kubenswrapper[5014]: I1006 21:33:32.047385 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"14817aec-b036-4806-9db0-4680b1bfadc1","Type":"ContainerStarted","Data":"33246a1fc78d2368155c42be6a724beeab71ed7b68a0e6c2d63b5f505417b093"} Oct 06 21:33:32 crc kubenswrapper[5014]: I1006 21:33:32.053018 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"5405c6a7-b9df-49ca-ba22-728e44178950","Type":"ContainerDied","Data":"c29cf4bbc84b2d55cbf38dd589d6689f0d95cf7715337da4cabd33639cf6fe56"} Oct 06 21:33:32 crc kubenswrapper[5014]: I1006 21:33:32.053046 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 06 21:33:32 crc kubenswrapper[5014]: I1006 21:33:32.053061 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c29cf4bbc84b2d55cbf38dd589d6689f0d95cf7715337da4cabd33639cf6fe56" Oct 06 21:33:32 crc kubenswrapper[5014]: I1006 21:33:32.077648 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=3.077628679 podStartE2EDuration="3.077628679s" podCreationTimestamp="2025-10-06 21:33:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:33:32.06997601 +0000 UTC m=+157.363012754" watchObservedRunningTime="2025-10-06 21:33:32.077628679 +0000 UTC m=+157.370665413" Oct 06 21:33:32 crc kubenswrapper[5014]: I1006 21:33:32.340773 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:32 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:32 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:32 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:32 crc kubenswrapper[5014]: I1006 21:33:32.340846 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:33 crc kubenswrapper[5014]: I1006 21:33:33.080811 5014 generic.go:334] "Generic (PLEG): container finished" podID="14817aec-b036-4806-9db0-4680b1bfadc1" containerID="33246a1fc78d2368155c42be6a724beeab71ed7b68a0e6c2d63b5f505417b093" exitCode=0 Oct 06 21:33:33 crc kubenswrapper[5014]: I1006 21:33:33.080924 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"14817aec-b036-4806-9db0-4680b1bfadc1","Type":"ContainerDied","Data":"33246a1fc78d2368155c42be6a724beeab71ed7b68a0e6c2d63b5f505417b093"} Oct 06 21:33:33 crc kubenswrapper[5014]: I1006 21:33:33.343182 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:33 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:33 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:33 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:33 crc kubenswrapper[5014]: I1006 21:33:33.343248 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:34 crc kubenswrapper[5014]: I1006 21:33:34.343664 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:34 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:34 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:34 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:34 crc kubenswrapper[5014]: I1006 21:33:34.343737 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:35 crc kubenswrapper[5014]: I1006 21:33:35.340335 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:35 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:35 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:35 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:35 crc kubenswrapper[5014]: I1006 21:33:35.340707 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:36 crc kubenswrapper[5014]: I1006 21:33:36.339943 5014 patch_prober.go:28] interesting pod/router-default-5444994796-s45dn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 06 21:33:36 crc kubenswrapper[5014]: [-]has-synced failed: reason withheld Oct 06 21:33:36 crc kubenswrapper[5014]: [+]process-running ok Oct 06 21:33:36 crc kubenswrapper[5014]: healthz check failed Oct 06 21:33:36 crc kubenswrapper[5014]: I1006 21:33:36.340259 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-s45dn" podUID="800f445b-95a6-4098-bf5f-ea91a7ead3d0" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 06 21:33:36 crc kubenswrapper[5014]: I1006 21:33:36.782024 5014 patch_prober.go:28] interesting pod/console-f9d7485db-kb2p5 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.21:8443/health\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Oct 06 21:33:36 crc kubenswrapper[5014]: I1006 21:33:36.782105 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-kb2p5" podUID="43df857e-f7f9-45e8-97e7-21adc3167678" containerName="console" probeResult="failure" output="Get \"https://10.217.0.21:8443/health\": dial tcp 10.217.0.21:8443: connect: connection refused" Oct 06 21:33:37 crc kubenswrapper[5014]: I1006 21:33:37.340677 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:37 crc kubenswrapper[5014]: I1006 21:33:37.351082 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-s45dn" Oct 06 21:33:37 crc kubenswrapper[5014]: I1006 21:33:37.386448 5014 patch_prober.go:28] interesting pod/downloads-7954f5f757-zh4s7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Oct 06 21:33:37 crc kubenswrapper[5014]: I1006 21:33:37.386494 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-zh4s7" podUID="2bb77525-303a-4691-81d2-0bbeb6eeed9c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Oct 06 21:33:37 crc kubenswrapper[5014]: I1006 21:33:37.387454 5014 patch_prober.go:28] interesting pod/downloads-7954f5f757-zh4s7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Oct 06 21:33:37 crc kubenswrapper[5014]: I1006 21:33:37.387478 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-zh4s7" podUID="2bb77525-303a-4691-81d2-0bbeb6eeed9c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Oct 06 21:33:38 crc kubenswrapper[5014]: I1006 21:33:38.320081 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs\") pod \"network-metrics-daemon-chcf6\" (UID: \"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\") " pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:33:38 crc kubenswrapper[5014]: I1006 21:33:38.330379 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e4dbffab-5f6a-4ba5-b0c3-68e7e8840621-metrics-certs\") pod \"network-metrics-daemon-chcf6\" (UID: \"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621\") " pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:33:38 crc kubenswrapper[5014]: I1006 21:33:38.625104 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-chcf6" Oct 06 21:33:45 crc kubenswrapper[5014]: I1006 21:33:45.875984 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:33:46 crc kubenswrapper[5014]: I1006 21:33:46.785127 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:46 crc kubenswrapper[5014]: I1006 21:33:46.788373 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:33:47 crc kubenswrapper[5014]: I1006 21:33:47.395883 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-zh4s7" Oct 06 21:33:50 crc kubenswrapper[5014]: I1006 21:33:50.285301 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 06 21:33:50 crc kubenswrapper[5014]: I1006 21:33:50.399719 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14817aec-b036-4806-9db0-4680b1bfadc1-kube-api-access\") pod \"14817aec-b036-4806-9db0-4680b1bfadc1\" (UID: \"14817aec-b036-4806-9db0-4680b1bfadc1\") " Oct 06 21:33:50 crc kubenswrapper[5014]: I1006 21:33:50.399792 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/14817aec-b036-4806-9db0-4680b1bfadc1-kubelet-dir\") pod \"14817aec-b036-4806-9db0-4680b1bfadc1\" (UID: \"14817aec-b036-4806-9db0-4680b1bfadc1\") " Oct 06 21:33:50 crc kubenswrapper[5014]: I1006 21:33:50.400272 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/14817aec-b036-4806-9db0-4680b1bfadc1-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "14817aec-b036-4806-9db0-4680b1bfadc1" (UID: "14817aec-b036-4806-9db0-4680b1bfadc1"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:33:50 crc kubenswrapper[5014]: I1006 21:33:50.410141 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14817aec-b036-4806-9db0-4680b1bfadc1-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "14817aec-b036-4806-9db0-4680b1bfadc1" (UID: "14817aec-b036-4806-9db0-4680b1bfadc1"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:33:50 crc kubenswrapper[5014]: I1006 21:33:50.501855 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14817aec-b036-4806-9db0-4680b1bfadc1-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 06 21:33:50 crc kubenswrapper[5014]: I1006 21:33:50.501909 5014 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/14817aec-b036-4806-9db0-4680b1bfadc1-kubelet-dir\") on node \"crc\" DevicePath \"\"" Oct 06 21:33:51 crc kubenswrapper[5014]: I1006 21:33:51.192703 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 06 21:33:51 crc kubenswrapper[5014]: I1006 21:33:51.192691 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"14817aec-b036-4806-9db0-4680b1bfadc1","Type":"ContainerDied","Data":"00bd2beb0ede7aeafc39890e77c388206b1de4c1f6416df716b9f5bbc569234f"} Oct 06 21:33:51 crc kubenswrapper[5014]: I1006 21:33:51.192835 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00bd2beb0ede7aeafc39890e77c388206b1de4c1f6416df716b9f5bbc569234f" Oct 06 21:33:51 crc kubenswrapper[5014]: I1006 21:33:51.735039 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:33:51 crc kubenswrapper[5014]: I1006 21:33:51.735136 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:33:56 crc kubenswrapper[5014]: I1006 21:33:56.620972 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-cl7xx" Oct 06 21:33:58 crc kubenswrapper[5014]: E1006 21:33:58.674751 5014 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Oct 06 21:33:58 crc kubenswrapper[5014]: E1006 21:33:58.675302 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jfd5h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-cfsf7_openshift-marketplace(e3393075-b0ae-43e5-9456-039b11d12c43): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 06 21:33:58 crc kubenswrapper[5014]: E1006 21:33:58.676513 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-cfsf7" podUID="e3393075-b0ae-43e5-9456-039b11d12c43" Oct 06 21:34:01 crc kubenswrapper[5014]: E1006 21:34:01.481855 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-cfsf7" podUID="e3393075-b0ae-43e5-9456-039b11d12c43" Oct 06 21:34:01 crc kubenswrapper[5014]: E1006 21:34:01.560039 5014 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Oct 06 21:34:01 crc kubenswrapper[5014]: E1006 21:34:01.560311 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mcfk5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-7xdvs_openshift-marketplace(49f3778d-0532-4fa4-9635-500e98f8b8e9): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 06 21:34:01 crc kubenswrapper[5014]: E1006 21:34:01.561551 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-7xdvs" podUID="49f3778d-0532-4fa4-9635-500e98f8b8e9" Oct 06 21:34:01 crc kubenswrapper[5014]: E1006 21:34:01.576791 5014 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Oct 06 21:34:01 crc kubenswrapper[5014]: E1006 21:34:01.576912 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5wzpg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-cgv2z_openshift-marketplace(9557159b-e76f-4958-8d67-87c9da20b9ac): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 06 21:34:01 crc kubenswrapper[5014]: E1006 21:34:01.578956 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-cgv2z" podUID="9557159b-e76f-4958-8d67-87c9da20b9ac" Oct 06 21:34:02 crc kubenswrapper[5014]: I1006 21:34:02.749812 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 06 21:34:03 crc kubenswrapper[5014]: E1006 21:34:03.031978 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-7xdvs" podUID="49f3778d-0532-4fa4-9635-500e98f8b8e9" Oct 06 21:34:03 crc kubenswrapper[5014]: E1006 21:34:03.032325 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-cgv2z" podUID="9557159b-e76f-4958-8d67-87c9da20b9ac" Oct 06 21:34:03 crc kubenswrapper[5014]: E1006 21:34:03.136020 5014 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Oct 06 21:34:03 crc kubenswrapper[5014]: E1006 21:34:03.136264 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lllsh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-kfzpv_openshift-marketplace(942343e5-31c5-44bf-accb-42c83a176d0c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 06 21:34:03 crc kubenswrapper[5014]: E1006 21:34:03.137690 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-kfzpv" podUID="942343e5-31c5-44bf-accb-42c83a176d0c" Oct 06 21:34:03 crc kubenswrapper[5014]: E1006 21:34:03.341729 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-kfzpv" podUID="942343e5-31c5-44bf-accb-42c83a176d0c" Oct 06 21:34:03 crc kubenswrapper[5014]: E1006 21:34:03.432377 5014 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Oct 06 21:34:03 crc kubenswrapper[5014]: E1006 21:34:03.433517 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nhjfp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-ftjbk_openshift-marketplace(c68ce8d8-f494-4971-8068-4fddf55fae97): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 06 21:34:03 crc kubenswrapper[5014]: E1006 21:34:03.434787 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-ftjbk" podUID="c68ce8d8-f494-4971-8068-4fddf55fae97" Oct 06 21:34:03 crc kubenswrapper[5014]: E1006 21:34:03.438730 5014 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Oct 06 21:34:03 crc kubenswrapper[5014]: E1006 21:34:03.438919 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-246cw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-ln2v9_openshift-marketplace(ef4fc2ec-e7f2-490d-95bb-8f13a71153ef): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 06 21:34:03 crc kubenswrapper[5014]: E1006 21:34:03.440109 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-ln2v9" podUID="ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" Oct 06 21:34:05 crc kubenswrapper[5014]: E1006 21:34:05.705075 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-ln2v9" podUID="ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" Oct 06 21:34:05 crc kubenswrapper[5014]: E1006 21:34:05.705664 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-ftjbk" podUID="c68ce8d8-f494-4971-8068-4fddf55fae97" Oct 06 21:34:06 crc kubenswrapper[5014]: E1006 21:34:06.608345 5014 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Oct 06 21:34:06 crc kubenswrapper[5014]: E1006 21:34:06.609141 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l6n2g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-5wc2h_openshift-marketplace(9af08180-3fb3-439d-b8cb-7b65f03c0413): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 06 21:34:06 crc kubenswrapper[5014]: E1006 21:34:06.610652 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-5wc2h" podUID="9af08180-3fb3-439d-b8cb-7b65f03c0413" Oct 06 21:34:06 crc kubenswrapper[5014]: E1006 21:34:06.651087 5014 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Oct 06 21:34:06 crc kubenswrapper[5014]: E1006 21:34:06.651293 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ftprd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-l926w_openshift-marketplace(a5ad6e3f-cae5-465c-9744-abc1f63afd99): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 06 21:34:06 crc kubenswrapper[5014]: E1006 21:34:06.653796 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-l926w" podUID="a5ad6e3f-cae5-465c-9744-abc1f63afd99" Oct 06 21:34:06 crc kubenswrapper[5014]: I1006 21:34:06.758902 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-chcf6"] Oct 06 21:34:06 crc kubenswrapper[5014]: W1006 21:34:06.772611 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode4dbffab_5f6a_4ba5_b0c3_68e7e8840621.slice/crio-47bb402ef6b170664f283a9f63dc1dfe98a749463fc3b5bcfdfc218ae7b4d1bd WatchSource:0}: Error finding container 47bb402ef6b170664f283a9f63dc1dfe98a749463fc3b5bcfdfc218ae7b4d1bd: Status 404 returned error can't find the container with id 47bb402ef6b170664f283a9f63dc1dfe98a749463fc3b5bcfdfc218ae7b4d1bd Oct 06 21:34:07 crc kubenswrapper[5014]: I1006 21:34:07.299749 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-chcf6" event={"ID":"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621","Type":"ContainerStarted","Data":"c787a8470fb67c85554946d0ce6421ea074cde641473b83e9615f78172fd3dde"} Oct 06 21:34:07 crc kubenswrapper[5014]: I1006 21:34:07.300309 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-chcf6" event={"ID":"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621","Type":"ContainerStarted","Data":"47bb402ef6b170664f283a9f63dc1dfe98a749463fc3b5bcfdfc218ae7b4d1bd"} Oct 06 21:34:07 crc kubenswrapper[5014]: E1006 21:34:07.304522 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-l926w" podUID="a5ad6e3f-cae5-465c-9744-abc1f63afd99" Oct 06 21:34:07 crc kubenswrapper[5014]: E1006 21:34:07.305049 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-5wc2h" podUID="9af08180-3fb3-439d-b8cb-7b65f03c0413" Oct 06 21:34:08 crc kubenswrapper[5014]: I1006 21:34:08.311012 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-chcf6" event={"ID":"e4dbffab-5f6a-4ba5-b0c3-68e7e8840621","Type":"ContainerStarted","Data":"11cbe2f903b4bd4c7f105218647fe03e84a78321667c7a542101bbb015a26e12"} Oct 06 21:34:08 crc kubenswrapper[5014]: I1006 21:34:08.338926 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-chcf6" podStartSLOduration=173.338904473 podStartE2EDuration="2m53.338904473s" podCreationTimestamp="2025-10-06 21:31:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:34:08.336584424 +0000 UTC m=+193.629621198" watchObservedRunningTime="2025-10-06 21:34:08.338904473 +0000 UTC m=+193.631941247" Oct 06 21:34:18 crc kubenswrapper[5014]: I1006 21:34:18.386589 5014 generic.go:334] "Generic (PLEG): container finished" podID="942343e5-31c5-44bf-accb-42c83a176d0c" containerID="f698fa53ea42102072b37c8455fa773f11c83f0dd0835fbe1f7fec5bee919b0b" exitCode=0 Oct 06 21:34:18 crc kubenswrapper[5014]: I1006 21:34:18.386783 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kfzpv" event={"ID":"942343e5-31c5-44bf-accb-42c83a176d0c","Type":"ContainerDied","Data":"f698fa53ea42102072b37c8455fa773f11c83f0dd0835fbe1f7fec5bee919b0b"} Oct 06 21:34:18 crc kubenswrapper[5014]: I1006 21:34:18.391483 5014 generic.go:334] "Generic (PLEG): container finished" podID="e3393075-b0ae-43e5-9456-039b11d12c43" containerID="d1e2ac5758f12de67f268a5719900993bd3cd8fab9b80965b8c4208f85da49bb" exitCode=0 Oct 06 21:34:18 crc kubenswrapper[5014]: I1006 21:34:18.391559 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfsf7" event={"ID":"e3393075-b0ae-43e5-9456-039b11d12c43","Type":"ContainerDied","Data":"d1e2ac5758f12de67f268a5719900993bd3cd8fab9b80965b8c4208f85da49bb"} Oct 06 21:34:18 crc kubenswrapper[5014]: I1006 21:34:18.396413 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cgv2z" event={"ID":"9557159b-e76f-4958-8d67-87c9da20b9ac","Type":"ContainerStarted","Data":"8bbb43e1b970a26b3c19e2fd6769b8cfbb194193b5f9f7475e6eff5ae5d3f1bd"} Oct 06 21:34:19 crc kubenswrapper[5014]: I1006 21:34:19.407146 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kfzpv" event={"ID":"942343e5-31c5-44bf-accb-42c83a176d0c","Type":"ContainerStarted","Data":"d688167b0074cedb82cec6ffa60377d0ae7cd15148d43b6d4704e490ce46b0a5"} Oct 06 21:34:19 crc kubenswrapper[5014]: I1006 21:34:19.409877 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfsf7" event={"ID":"e3393075-b0ae-43e5-9456-039b11d12c43","Type":"ContainerStarted","Data":"69d6b60bbdb19e143d1baea28833481da21c7c86f07cbd3e14384bb057794995"} Oct 06 21:34:19 crc kubenswrapper[5014]: I1006 21:34:19.412458 5014 generic.go:334] "Generic (PLEG): container finished" podID="49f3778d-0532-4fa4-9635-500e98f8b8e9" containerID="4a540b6a508273376272ccaf37a9e89f68530dced75cb240d5a71dda8c78ac9a" exitCode=0 Oct 06 21:34:19 crc kubenswrapper[5014]: I1006 21:34:19.412497 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xdvs" event={"ID":"49f3778d-0532-4fa4-9635-500e98f8b8e9","Type":"ContainerDied","Data":"4a540b6a508273376272ccaf37a9e89f68530dced75cb240d5a71dda8c78ac9a"} Oct 06 21:34:19 crc kubenswrapper[5014]: I1006 21:34:19.414799 5014 generic.go:334] "Generic (PLEG): container finished" podID="9557159b-e76f-4958-8d67-87c9da20b9ac" containerID="8bbb43e1b970a26b3c19e2fd6769b8cfbb194193b5f9f7475e6eff5ae5d3f1bd" exitCode=0 Oct 06 21:34:19 crc kubenswrapper[5014]: I1006 21:34:19.414822 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cgv2z" event={"ID":"9557159b-e76f-4958-8d67-87c9da20b9ac","Type":"ContainerDied","Data":"8bbb43e1b970a26b3c19e2fd6769b8cfbb194193b5f9f7475e6eff5ae5d3f1bd"} Oct 06 21:34:19 crc kubenswrapper[5014]: I1006 21:34:19.434665 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kfzpv" podStartSLOduration=3.161749615 podStartE2EDuration="56.434637662s" podCreationTimestamp="2025-10-06 21:33:23 +0000 UTC" firstStartedPulling="2025-10-06 21:33:25.696794462 +0000 UTC m=+150.989831196" lastFinishedPulling="2025-10-06 21:34:18.969682509 +0000 UTC m=+204.262719243" observedRunningTime="2025-10-06 21:34:19.428841094 +0000 UTC m=+204.721877868" watchObservedRunningTime="2025-10-06 21:34:19.434637662 +0000 UTC m=+204.727674406" Oct 06 21:34:19 crc kubenswrapper[5014]: I1006 21:34:19.454261 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cfsf7" podStartSLOduration=2.320867483 podStartE2EDuration="55.454237051s" podCreationTimestamp="2025-10-06 21:33:24 +0000 UTC" firstStartedPulling="2025-10-06 21:33:25.703578961 +0000 UTC m=+150.996615695" lastFinishedPulling="2025-10-06 21:34:18.836948529 +0000 UTC m=+204.129985263" observedRunningTime="2025-10-06 21:34:19.447401847 +0000 UTC m=+204.740438601" watchObservedRunningTime="2025-10-06 21:34:19.454237051 +0000 UTC m=+204.747273805" Oct 06 21:34:20 crc kubenswrapper[5014]: I1006 21:34:20.421413 5014 generic.go:334] "Generic (PLEG): container finished" podID="c68ce8d8-f494-4971-8068-4fddf55fae97" containerID="57cc09db6f57f1fbe7a95787820e28c7ec5b12c623cc1f086b313e2f9bea10dd" exitCode=0 Oct 06 21:34:20 crc kubenswrapper[5014]: I1006 21:34:20.421488 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ftjbk" event={"ID":"c68ce8d8-f494-4971-8068-4fddf55fae97","Type":"ContainerDied","Data":"57cc09db6f57f1fbe7a95787820e28c7ec5b12c623cc1f086b313e2f9bea10dd"} Oct 06 21:34:20 crc kubenswrapper[5014]: I1006 21:34:20.427817 5014 generic.go:334] "Generic (PLEG): container finished" podID="9af08180-3fb3-439d-b8cb-7b65f03c0413" containerID="473f7a9bbd9e40f5c008991fc3fd47765efe5620fe7a7fa7e2585a53f09e5c82" exitCode=0 Oct 06 21:34:20 crc kubenswrapper[5014]: I1006 21:34:20.427859 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5wc2h" event={"ID":"9af08180-3fb3-439d-b8cb-7b65f03c0413","Type":"ContainerDied","Data":"473f7a9bbd9e40f5c008991fc3fd47765efe5620fe7a7fa7e2585a53f09e5c82"} Oct 06 21:34:20 crc kubenswrapper[5014]: I1006 21:34:20.429793 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xdvs" event={"ID":"49f3778d-0532-4fa4-9635-500e98f8b8e9","Type":"ContainerStarted","Data":"70775cf30ef2f6346eaa2545f8cccb0700f0bee3e9d5ecdd6b5659d90db1d5ed"} Oct 06 21:34:20 crc kubenswrapper[5014]: I1006 21:34:20.437011 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cgv2z" event={"ID":"9557159b-e76f-4958-8d67-87c9da20b9ac","Type":"ContainerStarted","Data":"4e6a1f394df674a3470ba240256601cb5d31a3c880a8aafb4965dc5b621192d1"} Oct 06 21:34:20 crc kubenswrapper[5014]: I1006 21:34:20.498088 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cgv2z" podStartSLOduration=3.628168738 podStartE2EDuration="54.498072603s" podCreationTimestamp="2025-10-06 21:33:26 +0000 UTC" firstStartedPulling="2025-10-06 21:33:28.980888612 +0000 UTC m=+154.273925346" lastFinishedPulling="2025-10-06 21:34:19.850792477 +0000 UTC m=+205.143829211" observedRunningTime="2025-10-06 21:34:20.497027698 +0000 UTC m=+205.790064432" watchObservedRunningTime="2025-10-06 21:34:20.498072603 +0000 UTC m=+205.791109337" Oct 06 21:34:20 crc kubenswrapper[5014]: I1006 21:34:20.515476 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7xdvs" podStartSLOduration=2.559903933 podStartE2EDuration="53.515455766s" podCreationTimestamp="2025-10-06 21:33:27 +0000 UTC" firstStartedPulling="2025-10-06 21:33:28.906121235 +0000 UTC m=+154.199157969" lastFinishedPulling="2025-10-06 21:34:19.861673068 +0000 UTC m=+205.154709802" observedRunningTime="2025-10-06 21:34:20.513237281 +0000 UTC m=+205.806274015" watchObservedRunningTime="2025-10-06 21:34:20.515455766 +0000 UTC m=+205.808492500" Oct 06 21:34:21 crc kubenswrapper[5014]: I1006 21:34:21.445596 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ftjbk" event={"ID":"c68ce8d8-f494-4971-8068-4fddf55fae97","Type":"ContainerStarted","Data":"6bbb3f45accc86e90047da58c3f5449de81dac9aa42cc01c120fc772d62b68c3"} Oct 06 21:34:21 crc kubenswrapper[5014]: I1006 21:34:21.449353 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5wc2h" event={"ID":"9af08180-3fb3-439d-b8cb-7b65f03c0413","Type":"ContainerStarted","Data":"364d3ab5e8857490dab47c7d8e9d567e7934c7e7aca7557daf152e4160248c53"} Oct 06 21:34:21 crc kubenswrapper[5014]: I1006 21:34:21.472019 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ftjbk" podStartSLOduration=2.253378161 podStartE2EDuration="57.472004949s" podCreationTimestamp="2025-10-06 21:33:24 +0000 UTC" firstStartedPulling="2025-10-06 21:33:25.758077534 +0000 UTC m=+151.051114268" lastFinishedPulling="2025-10-06 21:34:20.976704322 +0000 UTC m=+206.269741056" observedRunningTime="2025-10-06 21:34:21.469695351 +0000 UTC m=+206.762732085" watchObservedRunningTime="2025-10-06 21:34:21.472004949 +0000 UTC m=+206.765041683" Oct 06 21:34:21 crc kubenswrapper[5014]: I1006 21:34:21.496199 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5wc2h" podStartSLOduration=3.207325201 podStartE2EDuration="56.496183855s" podCreationTimestamp="2025-10-06 21:33:25 +0000 UTC" firstStartedPulling="2025-10-06 21:33:27.856295368 +0000 UTC m=+153.149332102" lastFinishedPulling="2025-10-06 21:34:21.145154022 +0000 UTC m=+206.438190756" observedRunningTime="2025-10-06 21:34:21.491223755 +0000 UTC m=+206.784260489" watchObservedRunningTime="2025-10-06 21:34:21.496183855 +0000 UTC m=+206.789220589" Oct 06 21:34:21 crc kubenswrapper[5014]: I1006 21:34:21.735798 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:34:21 crc kubenswrapper[5014]: I1006 21:34:21.736180 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:34:21 crc kubenswrapper[5014]: I1006 21:34:21.736241 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:34:21 crc kubenswrapper[5014]: I1006 21:34:21.737079 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 21:34:21 crc kubenswrapper[5014]: I1006 21:34:21.737259 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c" gracePeriod=600 Oct 06 21:34:22 crc kubenswrapper[5014]: I1006 21:34:22.466530 5014 generic.go:334] "Generic (PLEG): container finished" podID="a5ad6e3f-cae5-465c-9744-abc1f63afd99" containerID="e38f2d25a89bd48e669bdf799a7b99814db8f85cedfd8342bb026311249eff37" exitCode=0 Oct 06 21:34:22 crc kubenswrapper[5014]: I1006 21:34:22.466587 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l926w" event={"ID":"a5ad6e3f-cae5-465c-9744-abc1f63afd99","Type":"ContainerDied","Data":"e38f2d25a89bd48e669bdf799a7b99814db8f85cedfd8342bb026311249eff37"} Oct 06 21:34:22 crc kubenswrapper[5014]: I1006 21:34:22.470732 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c" exitCode=0 Oct 06 21:34:22 crc kubenswrapper[5014]: I1006 21:34:22.470868 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c"} Oct 06 21:34:22 crc kubenswrapper[5014]: I1006 21:34:22.470973 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"4ae7b585fe6c43eb13afc90190430d2c1a49289fda17e2790f4be6b28606bf93"} Oct 06 21:34:23 crc kubenswrapper[5014]: I1006 21:34:23.478630 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l926w" event={"ID":"a5ad6e3f-cae5-465c-9744-abc1f63afd99","Type":"ContainerStarted","Data":"f3eba5e53648902945e408a699e57b814074fbd663884a3c47eae5160a9703a4"} Oct 06 21:34:23 crc kubenswrapper[5014]: I1006 21:34:23.481947 5014 generic.go:334] "Generic (PLEG): container finished" podID="ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" containerID="5d23eda04f5b29fcdbc5860230905a1352d0f99accbe46fa1a56dafca3f1f9dc" exitCode=0 Oct 06 21:34:23 crc kubenswrapper[5014]: I1006 21:34:23.482107 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ln2v9" event={"ID":"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef","Type":"ContainerDied","Data":"5d23eda04f5b29fcdbc5860230905a1352d0f99accbe46fa1a56dafca3f1f9dc"} Oct 06 21:34:23 crc kubenswrapper[5014]: I1006 21:34:23.502537 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-l926w" podStartSLOduration=3.384091533 podStartE2EDuration="57.502516493s" podCreationTimestamp="2025-10-06 21:33:26 +0000 UTC" firstStartedPulling="2025-10-06 21:33:28.959051434 +0000 UTC m=+154.252088168" lastFinishedPulling="2025-10-06 21:34:23.077476394 +0000 UTC m=+208.370513128" observedRunningTime="2025-10-06 21:34:23.501178367 +0000 UTC m=+208.794215131" watchObservedRunningTime="2025-10-06 21:34:23.502516493 +0000 UTC m=+208.795553227" Oct 06 21:34:23 crc kubenswrapper[5014]: I1006 21:34:23.898990 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:34:23 crc kubenswrapper[5014]: I1006 21:34:23.899875 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:34:24 crc kubenswrapper[5014]: I1006 21:34:24.502449 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:34:24 crc kubenswrapper[5014]: I1006 21:34:24.502492 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:34:24 crc kubenswrapper[5014]: I1006 21:34:24.606229 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:34:24 crc kubenswrapper[5014]: I1006 21:34:24.610796 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:34:24 crc kubenswrapper[5014]: I1006 21:34:24.726152 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:34:24 crc kubenswrapper[5014]: I1006 21:34:24.726228 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:34:24 crc kubenswrapper[5014]: I1006 21:34:24.770004 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:34:25 crc kubenswrapper[5014]: I1006 21:34:25.535831 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:34:25 crc kubenswrapper[5014]: I1006 21:34:25.536056 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:34:26 crc kubenswrapper[5014]: I1006 21:34:26.312557 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:34:26 crc kubenswrapper[5014]: I1006 21:34:26.312862 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:34:26 crc kubenswrapper[5014]: I1006 21:34:26.356122 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:34:26 crc kubenswrapper[5014]: I1006 21:34:26.509178 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ln2v9" event={"ID":"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef","Type":"ContainerStarted","Data":"783de64af90a589ff00286cd497076601c3de0c61b4ef4daa050687d7fd6a7b7"} Oct 06 21:34:26 crc kubenswrapper[5014]: I1006 21:34:26.530917 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ln2v9" podStartSLOduration=3.17401451 podStartE2EDuration="1m3.53089776s" podCreationTimestamp="2025-10-06 21:33:23 +0000 UTC" firstStartedPulling="2025-10-06 21:33:25.733801693 +0000 UTC m=+151.026838427" lastFinishedPulling="2025-10-06 21:34:26.090684943 +0000 UTC m=+211.383721677" observedRunningTime="2025-10-06 21:34:26.52974273 +0000 UTC m=+211.822779474" watchObservedRunningTime="2025-10-06 21:34:26.53089776 +0000 UTC m=+211.823934494" Oct 06 21:34:26 crc kubenswrapper[5014]: I1006 21:34:26.552837 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:34:26 crc kubenswrapper[5014]: I1006 21:34:26.718818 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:34:26 crc kubenswrapper[5014]: I1006 21:34:26.719300 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:34:26 crc kubenswrapper[5014]: I1006 21:34:26.808170 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:34:27 crc kubenswrapper[5014]: I1006 21:34:27.315894 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:34:27 crc kubenswrapper[5014]: I1006 21:34:27.315942 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:34:27 crc kubenswrapper[5014]: I1006 21:34:27.358409 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:34:27 crc kubenswrapper[5014]: I1006 21:34:27.559393 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:34:27 crc kubenswrapper[5014]: I1006 21:34:27.715956 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:34:27 crc kubenswrapper[5014]: I1006 21:34:27.716013 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:34:27 crc kubenswrapper[5014]: I1006 21:34:27.754798 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:34:28 crc kubenswrapper[5014]: I1006 21:34:28.555844 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:34:28 crc kubenswrapper[5014]: I1006 21:34:28.568263 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:34:28 crc kubenswrapper[5014]: I1006 21:34:28.720151 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cfsf7"] Oct 06 21:34:28 crc kubenswrapper[5014]: I1006 21:34:28.720394 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cfsf7" podUID="e3393075-b0ae-43e5-9456-039b11d12c43" containerName="registry-server" containerID="cri-o://69d6b60bbdb19e143d1baea28833481da21c7c86f07cbd3e14384bb057794995" gracePeriod=2 Oct 06 21:34:30 crc kubenswrapper[5014]: I1006 21:34:30.921773 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l926w"] Oct 06 21:34:30 crc kubenswrapper[5014]: I1006 21:34:30.922429 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-l926w" podUID="a5ad6e3f-cae5-465c-9744-abc1f63afd99" containerName="registry-server" containerID="cri-o://f3eba5e53648902945e408a699e57b814074fbd663884a3c47eae5160a9703a4" gracePeriod=2 Oct 06 21:34:31 crc kubenswrapper[5014]: I1006 21:34:31.123065 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7xdvs"] Oct 06 21:34:31 crc kubenswrapper[5014]: I1006 21:34:31.123311 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7xdvs" podUID="49f3778d-0532-4fa4-9635-500e98f8b8e9" containerName="registry-server" containerID="cri-o://70775cf30ef2f6346eaa2545f8cccb0700f0bee3e9d5ecdd6b5659d90db1d5ed" gracePeriod=2 Oct 06 21:34:31 crc kubenswrapper[5014]: I1006 21:34:31.535301 5014 generic.go:334] "Generic (PLEG): container finished" podID="e3393075-b0ae-43e5-9456-039b11d12c43" containerID="69d6b60bbdb19e143d1baea28833481da21c7c86f07cbd3e14384bb057794995" exitCode=0 Oct 06 21:34:31 crc kubenswrapper[5014]: I1006 21:34:31.535378 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfsf7" event={"ID":"e3393075-b0ae-43e5-9456-039b11d12c43","Type":"ContainerDied","Data":"69d6b60bbdb19e143d1baea28833481da21c7c86f07cbd3e14384bb057794995"} Oct 06 21:34:31 crc kubenswrapper[5014]: I1006 21:34:31.811044 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:34:31 crc kubenswrapper[5014]: I1006 21:34:31.888595 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3393075-b0ae-43e5-9456-039b11d12c43-utilities\") pod \"e3393075-b0ae-43e5-9456-039b11d12c43\" (UID: \"e3393075-b0ae-43e5-9456-039b11d12c43\") " Oct 06 21:34:31 crc kubenswrapper[5014]: I1006 21:34:31.888722 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfd5h\" (UniqueName: \"kubernetes.io/projected/e3393075-b0ae-43e5-9456-039b11d12c43-kube-api-access-jfd5h\") pod \"e3393075-b0ae-43e5-9456-039b11d12c43\" (UID: \"e3393075-b0ae-43e5-9456-039b11d12c43\") " Oct 06 21:34:31 crc kubenswrapper[5014]: I1006 21:34:31.888774 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3393075-b0ae-43e5-9456-039b11d12c43-catalog-content\") pod \"e3393075-b0ae-43e5-9456-039b11d12c43\" (UID: \"e3393075-b0ae-43e5-9456-039b11d12c43\") " Oct 06 21:34:31 crc kubenswrapper[5014]: I1006 21:34:31.889341 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3393075-b0ae-43e5-9456-039b11d12c43-utilities" (OuterVolumeSpecName: "utilities") pod "e3393075-b0ae-43e5-9456-039b11d12c43" (UID: "e3393075-b0ae-43e5-9456-039b11d12c43"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:34:31 crc kubenswrapper[5014]: I1006 21:34:31.909045 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3393075-b0ae-43e5-9456-039b11d12c43-kube-api-access-jfd5h" (OuterVolumeSpecName: "kube-api-access-jfd5h") pod "e3393075-b0ae-43e5-9456-039b11d12c43" (UID: "e3393075-b0ae-43e5-9456-039b11d12c43"). InnerVolumeSpecName "kube-api-access-jfd5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:34:31 crc kubenswrapper[5014]: I1006 21:34:31.965330 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3393075-b0ae-43e5-9456-039b11d12c43-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e3393075-b0ae-43e5-9456-039b11d12c43" (UID: "e3393075-b0ae-43e5-9456-039b11d12c43"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:34:31 crc kubenswrapper[5014]: I1006 21:34:31.990821 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3393075-b0ae-43e5-9456-039b11d12c43-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:34:31 crc kubenswrapper[5014]: I1006 21:34:31.991352 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfd5h\" (UniqueName: \"kubernetes.io/projected/e3393075-b0ae-43e5-9456-039b11d12c43-kube-api-access-jfd5h\") on node \"crc\" DevicePath \"\"" Oct 06 21:34:31 crc kubenswrapper[5014]: I1006 21:34:31.991368 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3393075-b0ae-43e5-9456-039b11d12c43-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:34:32 crc kubenswrapper[5014]: I1006 21:34:32.542480 5014 generic.go:334] "Generic (PLEG): container finished" podID="a5ad6e3f-cae5-465c-9744-abc1f63afd99" containerID="f3eba5e53648902945e408a699e57b814074fbd663884a3c47eae5160a9703a4" exitCode=0 Oct 06 21:34:32 crc kubenswrapper[5014]: I1006 21:34:32.542545 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l926w" event={"ID":"a5ad6e3f-cae5-465c-9744-abc1f63afd99","Type":"ContainerDied","Data":"f3eba5e53648902945e408a699e57b814074fbd663884a3c47eae5160a9703a4"} Oct 06 21:34:32 crc kubenswrapper[5014]: I1006 21:34:32.544749 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfsf7" event={"ID":"e3393075-b0ae-43e5-9456-039b11d12c43","Type":"ContainerDied","Data":"6f2fdfb4d1c5da6bbdfb37d149c9130e02065fe4bdc035f68b9266fb6facd449"} Oct 06 21:34:32 crc kubenswrapper[5014]: I1006 21:34:32.544831 5014 scope.go:117] "RemoveContainer" containerID="69d6b60bbdb19e143d1baea28833481da21c7c86f07cbd3e14384bb057794995" Oct 06 21:34:32 crc kubenswrapper[5014]: I1006 21:34:32.544895 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cfsf7" Oct 06 21:34:32 crc kubenswrapper[5014]: I1006 21:34:32.561044 5014 scope.go:117] "RemoveContainer" containerID="d1e2ac5758f12de67f268a5719900993bd3cd8fab9b80965b8c4208f85da49bb" Oct 06 21:34:32 crc kubenswrapper[5014]: I1006 21:34:32.573462 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cfsf7"] Oct 06 21:34:32 crc kubenswrapper[5014]: I1006 21:34:32.575604 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-cfsf7"] Oct 06 21:34:32 crc kubenswrapper[5014]: I1006 21:34:32.597174 5014 scope.go:117] "RemoveContainer" containerID="bdc0ca8bbb7b8cf496dd861769d192cb8c93411b3b075ab203fa37156c2eed18" Oct 06 21:34:32 crc kubenswrapper[5014]: I1006 21:34:32.992243 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.107072 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5ad6e3f-cae5-465c-9744-abc1f63afd99-catalog-content\") pod \"a5ad6e3f-cae5-465c-9744-abc1f63afd99\" (UID: \"a5ad6e3f-cae5-465c-9744-abc1f63afd99\") " Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.107140 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftprd\" (UniqueName: \"kubernetes.io/projected/a5ad6e3f-cae5-465c-9744-abc1f63afd99-kube-api-access-ftprd\") pod \"a5ad6e3f-cae5-465c-9744-abc1f63afd99\" (UID: \"a5ad6e3f-cae5-465c-9744-abc1f63afd99\") " Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.107188 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5ad6e3f-cae5-465c-9744-abc1f63afd99-utilities\") pod \"a5ad6e3f-cae5-465c-9744-abc1f63afd99\" (UID: \"a5ad6e3f-cae5-465c-9744-abc1f63afd99\") " Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.108020 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5ad6e3f-cae5-465c-9744-abc1f63afd99-utilities" (OuterVolumeSpecName: "utilities") pod "a5ad6e3f-cae5-465c-9744-abc1f63afd99" (UID: "a5ad6e3f-cae5-465c-9744-abc1f63afd99"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.117830 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5ad6e3f-cae5-465c-9744-abc1f63afd99-kube-api-access-ftprd" (OuterVolumeSpecName: "kube-api-access-ftprd") pod "a5ad6e3f-cae5-465c-9744-abc1f63afd99" (UID: "a5ad6e3f-cae5-465c-9744-abc1f63afd99"). InnerVolumeSpecName "kube-api-access-ftprd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.121187 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5ad6e3f-cae5-465c-9744-abc1f63afd99-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a5ad6e3f-cae5-465c-9744-abc1f63afd99" (UID: "a5ad6e3f-cae5-465c-9744-abc1f63afd99"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.209075 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5ad6e3f-cae5-465c-9744-abc1f63afd99-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.209110 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftprd\" (UniqueName: \"kubernetes.io/projected/a5ad6e3f-cae5-465c-9744-abc1f63afd99-kube-api-access-ftprd\") on node \"crc\" DevicePath \"\"" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.209124 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5ad6e3f-cae5-465c-9744-abc1f63afd99-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.469966 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.493568 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3393075-b0ae-43e5-9456-039b11d12c43" path="/var/lib/kubelet/pods/e3393075-b0ae-43e5-9456-039b11d12c43/volumes" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.553367 5014 generic.go:334] "Generic (PLEG): container finished" podID="49f3778d-0532-4fa4-9635-500e98f8b8e9" containerID="70775cf30ef2f6346eaa2545f8cccb0700f0bee3e9d5ecdd6b5659d90db1d5ed" exitCode=0 Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.553441 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xdvs" event={"ID":"49f3778d-0532-4fa4-9635-500e98f8b8e9","Type":"ContainerDied","Data":"70775cf30ef2f6346eaa2545f8cccb0700f0bee3e9d5ecdd6b5659d90db1d5ed"} Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.553475 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xdvs" event={"ID":"49f3778d-0532-4fa4-9635-500e98f8b8e9","Type":"ContainerDied","Data":"47cb5a9dc0ab5fff97c848d718e3eb4006452b78c7702289891a26efd8bd3833"} Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.553495 5014 scope.go:117] "RemoveContainer" containerID="70775cf30ef2f6346eaa2545f8cccb0700f0bee3e9d5ecdd6b5659d90db1d5ed" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.553613 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7xdvs" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.556800 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l926w" event={"ID":"a5ad6e3f-cae5-465c-9744-abc1f63afd99","Type":"ContainerDied","Data":"35690bd7fc793d01fb1ddb4211cce4a7546cf5844409770bfa34b712c499c7ce"} Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.556851 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l926w" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.578575 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l926w"] Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.580840 5014 scope.go:117] "RemoveContainer" containerID="4a540b6a508273376272ccaf37a9e89f68530dced75cb240d5a71dda8c78ac9a" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.580879 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-l926w"] Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.608257 5014 scope.go:117] "RemoveContainer" containerID="43231e83e0e2ebc7a9c08b4a9414c0acf9199208da4ba5a53b2a672809e2d927" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.614183 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49f3778d-0532-4fa4-9635-500e98f8b8e9-catalog-content\") pod \"49f3778d-0532-4fa4-9635-500e98f8b8e9\" (UID: \"49f3778d-0532-4fa4-9635-500e98f8b8e9\") " Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.614247 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49f3778d-0532-4fa4-9635-500e98f8b8e9-utilities\") pod \"49f3778d-0532-4fa4-9635-500e98f8b8e9\" (UID: \"49f3778d-0532-4fa4-9635-500e98f8b8e9\") " Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.614358 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcfk5\" (UniqueName: \"kubernetes.io/projected/49f3778d-0532-4fa4-9635-500e98f8b8e9-kube-api-access-mcfk5\") pod \"49f3778d-0532-4fa4-9635-500e98f8b8e9\" (UID: \"49f3778d-0532-4fa4-9635-500e98f8b8e9\") " Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.616232 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49f3778d-0532-4fa4-9635-500e98f8b8e9-utilities" (OuterVolumeSpecName: "utilities") pod "49f3778d-0532-4fa4-9635-500e98f8b8e9" (UID: "49f3778d-0532-4fa4-9635-500e98f8b8e9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.621302 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49f3778d-0532-4fa4-9635-500e98f8b8e9-kube-api-access-mcfk5" (OuterVolumeSpecName: "kube-api-access-mcfk5") pod "49f3778d-0532-4fa4-9635-500e98f8b8e9" (UID: "49f3778d-0532-4fa4-9635-500e98f8b8e9"). InnerVolumeSpecName "kube-api-access-mcfk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.624873 5014 scope.go:117] "RemoveContainer" containerID="70775cf30ef2f6346eaa2545f8cccb0700f0bee3e9d5ecdd6b5659d90db1d5ed" Oct 06 21:34:33 crc kubenswrapper[5014]: E1006 21:34:33.625259 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70775cf30ef2f6346eaa2545f8cccb0700f0bee3e9d5ecdd6b5659d90db1d5ed\": container with ID starting with 70775cf30ef2f6346eaa2545f8cccb0700f0bee3e9d5ecdd6b5659d90db1d5ed not found: ID does not exist" containerID="70775cf30ef2f6346eaa2545f8cccb0700f0bee3e9d5ecdd6b5659d90db1d5ed" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.625290 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70775cf30ef2f6346eaa2545f8cccb0700f0bee3e9d5ecdd6b5659d90db1d5ed"} err="failed to get container status \"70775cf30ef2f6346eaa2545f8cccb0700f0bee3e9d5ecdd6b5659d90db1d5ed\": rpc error: code = NotFound desc = could not find container \"70775cf30ef2f6346eaa2545f8cccb0700f0bee3e9d5ecdd6b5659d90db1d5ed\": container with ID starting with 70775cf30ef2f6346eaa2545f8cccb0700f0bee3e9d5ecdd6b5659d90db1d5ed not found: ID does not exist" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.625314 5014 scope.go:117] "RemoveContainer" containerID="4a540b6a508273376272ccaf37a9e89f68530dced75cb240d5a71dda8c78ac9a" Oct 06 21:34:33 crc kubenswrapper[5014]: E1006 21:34:33.625861 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a540b6a508273376272ccaf37a9e89f68530dced75cb240d5a71dda8c78ac9a\": container with ID starting with 4a540b6a508273376272ccaf37a9e89f68530dced75cb240d5a71dda8c78ac9a not found: ID does not exist" containerID="4a540b6a508273376272ccaf37a9e89f68530dced75cb240d5a71dda8c78ac9a" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.625883 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a540b6a508273376272ccaf37a9e89f68530dced75cb240d5a71dda8c78ac9a"} err="failed to get container status \"4a540b6a508273376272ccaf37a9e89f68530dced75cb240d5a71dda8c78ac9a\": rpc error: code = NotFound desc = could not find container \"4a540b6a508273376272ccaf37a9e89f68530dced75cb240d5a71dda8c78ac9a\": container with ID starting with 4a540b6a508273376272ccaf37a9e89f68530dced75cb240d5a71dda8c78ac9a not found: ID does not exist" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.625896 5014 scope.go:117] "RemoveContainer" containerID="43231e83e0e2ebc7a9c08b4a9414c0acf9199208da4ba5a53b2a672809e2d927" Oct 06 21:34:33 crc kubenswrapper[5014]: E1006 21:34:33.626488 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43231e83e0e2ebc7a9c08b4a9414c0acf9199208da4ba5a53b2a672809e2d927\": container with ID starting with 43231e83e0e2ebc7a9c08b4a9414c0acf9199208da4ba5a53b2a672809e2d927 not found: ID does not exist" containerID="43231e83e0e2ebc7a9c08b4a9414c0acf9199208da4ba5a53b2a672809e2d927" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.626520 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43231e83e0e2ebc7a9c08b4a9414c0acf9199208da4ba5a53b2a672809e2d927"} err="failed to get container status \"43231e83e0e2ebc7a9c08b4a9414c0acf9199208da4ba5a53b2a672809e2d927\": rpc error: code = NotFound desc = could not find container \"43231e83e0e2ebc7a9c08b4a9414c0acf9199208da4ba5a53b2a672809e2d927\": container with ID starting with 43231e83e0e2ebc7a9c08b4a9414c0acf9199208da4ba5a53b2a672809e2d927 not found: ID does not exist" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.626547 5014 scope.go:117] "RemoveContainer" containerID="f3eba5e53648902945e408a699e57b814074fbd663884a3c47eae5160a9703a4" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.666827 5014 scope.go:117] "RemoveContainer" containerID="e38f2d25a89bd48e669bdf799a7b99814db8f85cedfd8342bb026311249eff37" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.691324 5014 scope.go:117] "RemoveContainer" containerID="fdd618b338fbe46dc1a36cde9f496e82fba0546601bba2997438785091fcb11e" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.716055 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/49f3778d-0532-4fa4-9635-500e98f8b8e9-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.716184 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcfk5\" (UniqueName: \"kubernetes.io/projected/49f3778d-0532-4fa4-9635-500e98f8b8e9-kube-api-access-mcfk5\") on node \"crc\" DevicePath \"\"" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.778219 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49f3778d-0532-4fa4-9635-500e98f8b8e9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "49f3778d-0532-4fa4-9635-500e98f8b8e9" (UID: "49f3778d-0532-4fa4-9635-500e98f8b8e9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.816867 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/49f3778d-0532-4fa4-9635-500e98f8b8e9-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.896452 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7xdvs"] Oct 06 21:34:33 crc kubenswrapper[5014]: I1006 21:34:33.899791 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7xdvs"] Oct 06 21:34:34 crc kubenswrapper[5014]: I1006 21:34:34.307333 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:34:34 crc kubenswrapper[5014]: I1006 21:34:34.307487 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:34:34 crc kubenswrapper[5014]: I1006 21:34:34.347081 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:34:34 crc kubenswrapper[5014]: I1006 21:34:34.537511 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:34:34 crc kubenswrapper[5014]: I1006 21:34:34.617202 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:34:35 crc kubenswrapper[5014]: I1006 21:34:35.492352 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49f3778d-0532-4fa4-9635-500e98f8b8e9" path="/var/lib/kubelet/pods/49f3778d-0532-4fa4-9635-500e98f8b8e9/volumes" Oct 06 21:34:35 crc kubenswrapper[5014]: I1006 21:34:35.493599 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5ad6e3f-cae5-465c-9744-abc1f63afd99" path="/var/lib/kubelet/pods/a5ad6e3f-cae5-465c-9744-abc1f63afd99/volumes" Oct 06 21:34:37 crc kubenswrapper[5014]: I1006 21:34:37.526021 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ln2v9"] Oct 06 21:34:37 crc kubenswrapper[5014]: I1006 21:34:37.582369 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ln2v9" podUID="ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" containerName="registry-server" containerID="cri-o://783de64af90a589ff00286cd497076601c3de0c61b4ef4daa050687d7fd6a7b7" gracePeriod=2 Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.355940 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.484369 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-246cw\" (UniqueName: \"kubernetes.io/projected/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-kube-api-access-246cw\") pod \"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef\" (UID: \"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef\") " Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.484459 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-utilities\") pod \"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef\" (UID: \"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef\") " Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.484514 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-catalog-content\") pod \"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef\" (UID: \"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef\") " Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.485945 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-utilities" (OuterVolumeSpecName: "utilities") pod "ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" (UID: "ef4fc2ec-e7f2-490d-95bb-8f13a71153ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.494387 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-kube-api-access-246cw" (OuterVolumeSpecName: "kube-api-access-246cw") pod "ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" (UID: "ef4fc2ec-e7f2-490d-95bb-8f13a71153ef"). InnerVolumeSpecName "kube-api-access-246cw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.529924 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" (UID: "ef4fc2ec-e7f2-490d-95bb-8f13a71153ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.586106 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-246cw\" (UniqueName: \"kubernetes.io/projected/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-kube-api-access-246cw\") on node \"crc\" DevicePath \"\"" Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.586138 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.586147 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.589569 5014 generic.go:334] "Generic (PLEG): container finished" podID="ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" containerID="783de64af90a589ff00286cd497076601c3de0c61b4ef4daa050687d7fd6a7b7" exitCode=0 Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.589644 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ln2v9" event={"ID":"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef","Type":"ContainerDied","Data":"783de64af90a589ff00286cd497076601c3de0c61b4ef4daa050687d7fd6a7b7"} Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.589682 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ln2v9" event={"ID":"ef4fc2ec-e7f2-490d-95bb-8f13a71153ef","Type":"ContainerDied","Data":"f4716888c81d33c1916775b691ba30a594f715549ae8eeff4310a4c6b5f996e2"} Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.589703 5014 scope.go:117] "RemoveContainer" containerID="783de64af90a589ff00286cd497076601c3de0c61b4ef4daa050687d7fd6a7b7" Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.589852 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ln2v9" Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.619049 5014 scope.go:117] "RemoveContainer" containerID="5d23eda04f5b29fcdbc5860230905a1352d0f99accbe46fa1a56dafca3f1f9dc" Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.622114 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ln2v9"] Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.624711 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ln2v9"] Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.651885 5014 scope.go:117] "RemoveContainer" containerID="6d808a1a6540e6851ebc43f9fb6613fa3358bd652d7bd5bfe51d612fde943863" Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.667206 5014 scope.go:117] "RemoveContainer" containerID="783de64af90a589ff00286cd497076601c3de0c61b4ef4daa050687d7fd6a7b7" Oct 06 21:34:38 crc kubenswrapper[5014]: E1006 21:34:38.667567 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"783de64af90a589ff00286cd497076601c3de0c61b4ef4daa050687d7fd6a7b7\": container with ID starting with 783de64af90a589ff00286cd497076601c3de0c61b4ef4daa050687d7fd6a7b7 not found: ID does not exist" containerID="783de64af90a589ff00286cd497076601c3de0c61b4ef4daa050687d7fd6a7b7" Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.667600 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"783de64af90a589ff00286cd497076601c3de0c61b4ef4daa050687d7fd6a7b7"} err="failed to get container status \"783de64af90a589ff00286cd497076601c3de0c61b4ef4daa050687d7fd6a7b7\": rpc error: code = NotFound desc = could not find container \"783de64af90a589ff00286cd497076601c3de0c61b4ef4daa050687d7fd6a7b7\": container with ID starting with 783de64af90a589ff00286cd497076601c3de0c61b4ef4daa050687d7fd6a7b7 not found: ID does not exist" Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.667986 5014 scope.go:117] "RemoveContainer" containerID="5d23eda04f5b29fcdbc5860230905a1352d0f99accbe46fa1a56dafca3f1f9dc" Oct 06 21:34:38 crc kubenswrapper[5014]: E1006 21:34:38.668672 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d23eda04f5b29fcdbc5860230905a1352d0f99accbe46fa1a56dafca3f1f9dc\": container with ID starting with 5d23eda04f5b29fcdbc5860230905a1352d0f99accbe46fa1a56dafca3f1f9dc not found: ID does not exist" containerID="5d23eda04f5b29fcdbc5860230905a1352d0f99accbe46fa1a56dafca3f1f9dc" Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.668701 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d23eda04f5b29fcdbc5860230905a1352d0f99accbe46fa1a56dafca3f1f9dc"} err="failed to get container status \"5d23eda04f5b29fcdbc5860230905a1352d0f99accbe46fa1a56dafca3f1f9dc\": rpc error: code = NotFound desc = could not find container \"5d23eda04f5b29fcdbc5860230905a1352d0f99accbe46fa1a56dafca3f1f9dc\": container with ID starting with 5d23eda04f5b29fcdbc5860230905a1352d0f99accbe46fa1a56dafca3f1f9dc not found: ID does not exist" Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.668717 5014 scope.go:117] "RemoveContainer" containerID="6d808a1a6540e6851ebc43f9fb6613fa3358bd652d7bd5bfe51d612fde943863" Oct 06 21:34:38 crc kubenswrapper[5014]: E1006 21:34:38.669044 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d808a1a6540e6851ebc43f9fb6613fa3358bd652d7bd5bfe51d612fde943863\": container with ID starting with 6d808a1a6540e6851ebc43f9fb6613fa3358bd652d7bd5bfe51d612fde943863 not found: ID does not exist" containerID="6d808a1a6540e6851ebc43f9fb6613fa3358bd652d7bd5bfe51d612fde943863" Oct 06 21:34:38 crc kubenswrapper[5014]: I1006 21:34:38.669069 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d808a1a6540e6851ebc43f9fb6613fa3358bd652d7bd5bfe51d612fde943863"} err="failed to get container status \"6d808a1a6540e6851ebc43f9fb6613fa3358bd652d7bd5bfe51d612fde943863\": rpc error: code = NotFound desc = could not find container \"6d808a1a6540e6851ebc43f9fb6613fa3358bd652d7bd5bfe51d612fde943863\": container with ID starting with 6d808a1a6540e6851ebc43f9fb6613fa3358bd652d7bd5bfe51d612fde943863 not found: ID does not exist" Oct 06 21:34:39 crc kubenswrapper[5014]: I1006 21:34:39.493205 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" path="/var/lib/kubelet/pods/ef4fc2ec-e7f2-490d-95bb-8f13a71153ef/volumes" Oct 06 21:34:45 crc kubenswrapper[5014]: I1006 21:34:45.836453 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-v74l6"] Oct 06 21:35:10 crc kubenswrapper[5014]: I1006 21:35:10.878264 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" podUID="720c691e-a28e-4b39-9571-86e321399306" containerName="oauth-openshift" containerID="cri-o://cf6c11b6dde3468bf6e490f0312c45a66f897ab6703dae5e894d586ce5647f0d" gracePeriod=15 Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.311639 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.351993 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-574dcf5686-tw248"] Oct 06 21:35:11 crc kubenswrapper[5014]: E1006 21:35:11.353530 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49f3778d-0532-4fa4-9635-500e98f8b8e9" containerName="extract-utilities" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.353648 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="49f3778d-0532-4fa4-9635-500e98f8b8e9" containerName="extract-utilities" Oct 06 21:35:11 crc kubenswrapper[5014]: E1006 21:35:11.353715 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3393075-b0ae-43e5-9456-039b11d12c43" containerName="registry-server" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.353769 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3393075-b0ae-43e5-9456-039b11d12c43" containerName="registry-server" Oct 06 21:35:11 crc kubenswrapper[5014]: E1006 21:35:11.353837 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5ad6e3f-cae5-465c-9744-abc1f63afd99" containerName="registry-server" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.354006 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5ad6e3f-cae5-465c-9744-abc1f63afd99" containerName="registry-server" Oct 06 21:35:11 crc kubenswrapper[5014]: E1006 21:35:11.354089 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3393075-b0ae-43e5-9456-039b11d12c43" containerName="extract-content" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.354145 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3393075-b0ae-43e5-9456-039b11d12c43" containerName="extract-content" Oct 06 21:35:11 crc kubenswrapper[5014]: E1006 21:35:11.354205 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" containerName="extract-content" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.354304 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" containerName="extract-content" Oct 06 21:35:11 crc kubenswrapper[5014]: E1006 21:35:11.354366 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3393075-b0ae-43e5-9456-039b11d12c43" containerName="extract-utilities" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.354423 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3393075-b0ae-43e5-9456-039b11d12c43" containerName="extract-utilities" Oct 06 21:35:11 crc kubenswrapper[5014]: E1006 21:35:11.354487 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5405c6a7-b9df-49ca-ba22-728e44178950" containerName="pruner" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.354544 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5405c6a7-b9df-49ca-ba22-728e44178950" containerName="pruner" Oct 06 21:35:11 crc kubenswrapper[5014]: E1006 21:35:11.354604 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" containerName="registry-server" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.354688 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" containerName="registry-server" Oct 06 21:35:11 crc kubenswrapper[5014]: E1006 21:35:11.354749 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" containerName="extract-utilities" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.354802 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" containerName="extract-utilities" Oct 06 21:35:11 crc kubenswrapper[5014]: E1006 21:35:11.354860 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5ad6e3f-cae5-465c-9744-abc1f63afd99" containerName="extract-content" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.354923 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5ad6e3f-cae5-465c-9744-abc1f63afd99" containerName="extract-content" Oct 06 21:35:11 crc kubenswrapper[5014]: E1006 21:35:11.354984 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49f3778d-0532-4fa4-9635-500e98f8b8e9" containerName="extract-content" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.355036 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="49f3778d-0532-4fa4-9635-500e98f8b8e9" containerName="extract-content" Oct 06 21:35:11 crc kubenswrapper[5014]: E1006 21:35:11.355089 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="720c691e-a28e-4b39-9571-86e321399306" containerName="oauth-openshift" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.355148 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="720c691e-a28e-4b39-9571-86e321399306" containerName="oauth-openshift" Oct 06 21:35:11 crc kubenswrapper[5014]: E1006 21:35:11.355208 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5ad6e3f-cae5-465c-9744-abc1f63afd99" containerName="extract-utilities" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.355264 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5ad6e3f-cae5-465c-9744-abc1f63afd99" containerName="extract-utilities" Oct 06 21:35:11 crc kubenswrapper[5014]: E1006 21:35:11.355325 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49f3778d-0532-4fa4-9635-500e98f8b8e9" containerName="registry-server" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.355385 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="49f3778d-0532-4fa4-9635-500e98f8b8e9" containerName="registry-server" Oct 06 21:35:11 crc kubenswrapper[5014]: E1006 21:35:11.355440 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14817aec-b036-4806-9db0-4680b1bfadc1" containerName="pruner" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.355498 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="14817aec-b036-4806-9db0-4680b1bfadc1" containerName="pruner" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.355688 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="14817aec-b036-4806-9db0-4680b1bfadc1" containerName="pruner" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.355755 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5405c6a7-b9df-49ca-ba22-728e44178950" containerName="pruner" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.355834 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="720c691e-a28e-4b39-9571-86e321399306" containerName="oauth-openshift" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.355900 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef4fc2ec-e7f2-490d-95bb-8f13a71153ef" containerName="registry-server" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.355960 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5ad6e3f-cae5-465c-9744-abc1f63afd99" containerName="registry-server" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.356044 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="49f3778d-0532-4fa4-9635-500e98f8b8e9" containerName="registry-server" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.357775 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3393075-b0ae-43e5-9456-039b11d12c43" containerName="registry-server" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.358394 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.371898 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-574dcf5686-tw248"] Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.510176 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-router-certs\") pod \"720c691e-a28e-4b39-9571-86e321399306\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.510259 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-trusted-ca-bundle\") pod \"720c691e-a28e-4b39-9571-86e321399306\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.510352 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-service-ca\") pod \"720c691e-a28e-4b39-9571-86e321399306\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.510395 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zw594\" (UniqueName: \"kubernetes.io/projected/720c691e-a28e-4b39-9571-86e321399306-kube-api-access-zw594\") pod \"720c691e-a28e-4b39-9571-86e321399306\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.510441 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-session\") pod \"720c691e-a28e-4b39-9571-86e321399306\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.510496 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-audit-policies\") pod \"720c691e-a28e-4b39-9571-86e321399306\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.510557 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-serving-cert\") pod \"720c691e-a28e-4b39-9571-86e321399306\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.510670 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-ocp-branding-template\") pod \"720c691e-a28e-4b39-9571-86e321399306\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.510752 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-cliconfig\") pod \"720c691e-a28e-4b39-9571-86e321399306\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.510797 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/720c691e-a28e-4b39-9571-86e321399306-audit-dir\") pod \"720c691e-a28e-4b39-9571-86e321399306\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.510842 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-error\") pod \"720c691e-a28e-4b39-9571-86e321399306\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.510904 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-login\") pod \"720c691e-a28e-4b39-9571-86e321399306\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.510956 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-idp-0-file-data\") pod \"720c691e-a28e-4b39-9571-86e321399306\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.510948 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/720c691e-a28e-4b39-9571-86e321399306-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "720c691e-a28e-4b39-9571-86e321399306" (UID: "720c691e-a28e-4b39-9571-86e321399306"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.511039 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-provider-selection\") pod \"720c691e-a28e-4b39-9571-86e321399306\" (UID: \"720c691e-a28e-4b39-9571-86e321399306\") " Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.511356 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.511416 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-service-ca\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.511487 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-serving-cert\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.511552 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-cliconfig\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.511595 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhk7w\" (UniqueName: \"kubernetes.io/projected/be27e766-39b8-4937-954d-e15d16369094-kube-api-access-rhk7w\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.511699 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-router-certs\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.511751 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/be27e766-39b8-4937-954d-e15d16369094-audit-policies\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.511800 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-user-template-error\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.511842 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.511848 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "720c691e-a28e-4b39-9571-86e321399306" (UID: "720c691e-a28e-4b39-9571-86e321399306"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.511893 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/be27e766-39b8-4937-954d-e15d16369094-audit-dir\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.511935 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-session\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.511988 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-user-template-login\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.512033 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.512025 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "720c691e-a28e-4b39-9571-86e321399306" (UID: "720c691e-a28e-4b39-9571-86e321399306"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.512078 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.512249 5014 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/720c691e-a28e-4b39-9571-86e321399306-audit-dir\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.512285 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.512312 5014 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-audit-policies\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.512806 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "720c691e-a28e-4b39-9571-86e321399306" (UID: "720c691e-a28e-4b39-9571-86e321399306"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.512946 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "720c691e-a28e-4b39-9571-86e321399306" (UID: "720c691e-a28e-4b39-9571-86e321399306"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.519234 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "720c691e-a28e-4b39-9571-86e321399306" (UID: "720c691e-a28e-4b39-9571-86e321399306"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.520392 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "720c691e-a28e-4b39-9571-86e321399306" (UID: "720c691e-a28e-4b39-9571-86e321399306"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.520534 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/720c691e-a28e-4b39-9571-86e321399306-kube-api-access-zw594" (OuterVolumeSpecName: "kube-api-access-zw594") pod "720c691e-a28e-4b39-9571-86e321399306" (UID: "720c691e-a28e-4b39-9571-86e321399306"). InnerVolumeSpecName "kube-api-access-zw594". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.520748 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "720c691e-a28e-4b39-9571-86e321399306" (UID: "720c691e-a28e-4b39-9571-86e321399306"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.521352 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "720c691e-a28e-4b39-9571-86e321399306" (UID: "720c691e-a28e-4b39-9571-86e321399306"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.521799 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "720c691e-a28e-4b39-9571-86e321399306" (UID: "720c691e-a28e-4b39-9571-86e321399306"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.522287 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "720c691e-a28e-4b39-9571-86e321399306" (UID: "720c691e-a28e-4b39-9571-86e321399306"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.522405 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "720c691e-a28e-4b39-9571-86e321399306" (UID: "720c691e-a28e-4b39-9571-86e321399306"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.522647 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "720c691e-a28e-4b39-9571-86e321399306" (UID: "720c691e-a28e-4b39-9571-86e321399306"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.614025 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.614108 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-service-ca\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.614150 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-serving-cert\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.614182 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-cliconfig\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.614210 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhk7w\" (UniqueName: \"kubernetes.io/projected/be27e766-39b8-4937-954d-e15d16369094-kube-api-access-rhk7w\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.614243 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-router-certs\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.614266 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/be27e766-39b8-4937-954d-e15d16369094-audit-policies\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.614295 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-user-template-error\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.614312 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.616985 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/be27e766-39b8-4937-954d-e15d16369094-audit-dir\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.617393 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-session\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.617603 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/be27e766-39b8-4937-954d-e15d16369094-audit-dir\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.617984 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-user-template-login\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.618119 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.618378 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.618579 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-service-ca\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.619426 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.620101 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-cliconfig\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.620396 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/be27e766-39b8-4937-954d-e15d16369094-audit-policies\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.625480 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-user-template-login\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.626113 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-session\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.629909 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.632896 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.632933 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.632958 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.632975 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.632989 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.633007 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zw594\" (UniqueName: \"kubernetes.io/projected/720c691e-a28e-4b39-9571-86e321399306-kube-api-access-zw594\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.633025 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.633041 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.633051 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.633066 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.633077 5014 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/720c691e-a28e-4b39-9571-86e321399306-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.634276 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.635497 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-serving-cert\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.637920 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-user-template-error\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.638014 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.638190 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhk7w\" (UniqueName: \"kubernetes.io/projected/be27e766-39b8-4937-954d-e15d16369094-kube-api-access-rhk7w\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.638233 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/be27e766-39b8-4937-954d-e15d16369094-v4-0-config-system-router-certs\") pod \"oauth-openshift-574dcf5686-tw248\" (UID: \"be27e766-39b8-4937-954d-e15d16369094\") " pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.686648 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.812394 5014 generic.go:334] "Generic (PLEG): container finished" podID="720c691e-a28e-4b39-9571-86e321399306" containerID="cf6c11b6dde3468bf6e490f0312c45a66f897ab6703dae5e894d586ce5647f0d" exitCode=0 Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.812470 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" event={"ID":"720c691e-a28e-4b39-9571-86e321399306","Type":"ContainerDied","Data":"cf6c11b6dde3468bf6e490f0312c45a66f897ab6703dae5e894d586ce5647f0d"} Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.812520 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" event={"ID":"720c691e-a28e-4b39-9571-86e321399306","Type":"ContainerDied","Data":"f4c6ed903c3395f90e7066961181cce4c93c729200b9de466531c3701c6f41b4"} Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.812553 5014 scope.go:117] "RemoveContainer" containerID="cf6c11b6dde3468bf6e490f0312c45a66f897ab6703dae5e894d586ce5647f0d" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.813251 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-v74l6" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.861383 5014 scope.go:117] "RemoveContainer" containerID="cf6c11b6dde3468bf6e490f0312c45a66f897ab6703dae5e894d586ce5647f0d" Oct 06 21:35:11 crc kubenswrapper[5014]: E1006 21:35:11.864327 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf6c11b6dde3468bf6e490f0312c45a66f897ab6703dae5e894d586ce5647f0d\": container with ID starting with cf6c11b6dde3468bf6e490f0312c45a66f897ab6703dae5e894d586ce5647f0d not found: ID does not exist" containerID="cf6c11b6dde3468bf6e490f0312c45a66f897ab6703dae5e894d586ce5647f0d" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.864370 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf6c11b6dde3468bf6e490f0312c45a66f897ab6703dae5e894d586ce5647f0d"} err="failed to get container status \"cf6c11b6dde3468bf6e490f0312c45a66f897ab6703dae5e894d586ce5647f0d\": rpc error: code = NotFound desc = could not find container \"cf6c11b6dde3468bf6e490f0312c45a66f897ab6703dae5e894d586ce5647f0d\": container with ID starting with cf6c11b6dde3468bf6e490f0312c45a66f897ab6703dae5e894d586ce5647f0d not found: ID does not exist" Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.865869 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-v74l6"] Oct 06 21:35:11 crc kubenswrapper[5014]: I1006 21:35:11.867580 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-v74l6"] Oct 06 21:35:12 crc kubenswrapper[5014]: I1006 21:35:12.018355 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-574dcf5686-tw248"] Oct 06 21:35:12 crc kubenswrapper[5014]: I1006 21:35:12.824109 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" event={"ID":"be27e766-39b8-4937-954d-e15d16369094","Type":"ContainerStarted","Data":"672ba5e59f42957152e92161d7531a22939d619ece54a16308b22c1b8d6ff69c"} Oct 06 21:35:12 crc kubenswrapper[5014]: I1006 21:35:12.824182 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" event={"ID":"be27e766-39b8-4937-954d-e15d16369094","Type":"ContainerStarted","Data":"3a7977b42920d8e9a53b2a49e796460ab3f4e4b7389d3b8f622d7a008fb10a4a"} Oct 06 21:35:12 crc kubenswrapper[5014]: I1006 21:35:12.824693 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:12 crc kubenswrapper[5014]: I1006 21:35:12.831809 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" Oct 06 21:35:12 crc kubenswrapper[5014]: I1006 21:35:12.900194 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-574dcf5686-tw248" podStartSLOduration=27.900161393 podStartE2EDuration="27.900161393s" podCreationTimestamp="2025-10-06 21:34:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:35:12.8602162 +0000 UTC m=+258.153252974" watchObservedRunningTime="2025-10-06 21:35:12.900161393 +0000 UTC m=+258.193198137" Oct 06 21:35:13 crc kubenswrapper[5014]: I1006 21:35:13.496673 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="720c691e-a28e-4b39-9571-86e321399306" path="/var/lib/kubelet/pods/720c691e-a28e-4b39-9571-86e321399306/volumes" Oct 06 21:35:40 crc kubenswrapper[5014]: I1006 21:35:40.839324 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kfzpv"] Oct 06 21:35:40 crc kubenswrapper[5014]: I1006 21:35:40.840472 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kfzpv" podUID="942343e5-31c5-44bf-accb-42c83a176d0c" containerName="registry-server" containerID="cri-o://d688167b0074cedb82cec6ffa60377d0ae7cd15148d43b6d4704e490ce46b0a5" gracePeriod=30 Oct 06 21:35:40 crc kubenswrapper[5014]: I1006 21:35:40.850875 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ftjbk"] Oct 06 21:35:40 crc kubenswrapper[5014]: I1006 21:35:40.852685 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ftjbk" podUID="c68ce8d8-f494-4971-8068-4fddf55fae97" containerName="registry-server" containerID="cri-o://6bbb3f45accc86e90047da58c3f5449de81dac9aa42cc01c120fc772d62b68c3" gracePeriod=30 Oct 06 21:35:40 crc kubenswrapper[5014]: I1006 21:35:40.863227 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lts6v"] Oct 06 21:35:40 crc kubenswrapper[5014]: I1006 21:35:40.863577 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" podUID="04070324-674e-4785-aada-ad9ffe6e89c8" containerName="marketplace-operator" containerID="cri-o://a63b374e2b15c0781f81854b965d1c222a498df823d168b9df2eff34fbf21c26" gracePeriod=30 Oct 06 21:35:40 crc kubenswrapper[5014]: I1006 21:35:40.879455 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5wc2h"] Oct 06 21:35:40 crc kubenswrapper[5014]: I1006 21:35:40.879858 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5wc2h" podUID="9af08180-3fb3-439d-b8cb-7b65f03c0413" containerName="registry-server" containerID="cri-o://364d3ab5e8857490dab47c7d8e9d567e7934c7e7aca7557daf152e4160248c53" gracePeriod=30 Oct 06 21:35:40 crc kubenswrapper[5014]: I1006 21:35:40.894131 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cgv2z"] Oct 06 21:35:40 crc kubenswrapper[5014]: I1006 21:35:40.894598 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cgv2z" podUID="9557159b-e76f-4958-8d67-87c9da20b9ac" containerName="registry-server" containerID="cri-o://4e6a1f394df674a3470ba240256601cb5d31a3c880a8aafb4965dc5b621192d1" gracePeriod=30 Oct 06 21:35:40 crc kubenswrapper[5014]: I1006 21:35:40.897348 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fmzl2"] Oct 06 21:35:40 crc kubenswrapper[5014]: I1006 21:35:40.898356 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fmzl2" Oct 06 21:35:40 crc kubenswrapper[5014]: I1006 21:35:40.914999 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fmzl2"] Oct 06 21:35:40 crc kubenswrapper[5014]: I1006 21:35:40.988490 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qt7r9\" (UniqueName: \"kubernetes.io/projected/2c999244-e7a0-42b2-9c2d-4e9a722617cd-kube-api-access-qt7r9\") pod \"marketplace-operator-79b997595-fmzl2\" (UID: \"2c999244-e7a0-42b2-9c2d-4e9a722617cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-fmzl2" Oct 06 21:35:40 crc kubenswrapper[5014]: I1006 21:35:40.988597 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2c999244-e7a0-42b2-9c2d-4e9a722617cd-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fmzl2\" (UID: \"2c999244-e7a0-42b2-9c2d-4e9a722617cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-fmzl2" Oct 06 21:35:40 crc kubenswrapper[5014]: I1006 21:35:40.988649 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2c999244-e7a0-42b2-9c2d-4e9a722617cd-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fmzl2\" (UID: \"2c999244-e7a0-42b2-9c2d-4e9a722617cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-fmzl2" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.028646 5014 generic.go:334] "Generic (PLEG): container finished" podID="9557159b-e76f-4958-8d67-87c9da20b9ac" containerID="4e6a1f394df674a3470ba240256601cb5d31a3c880a8aafb4965dc5b621192d1" exitCode=0 Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.028755 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cgv2z" event={"ID":"9557159b-e76f-4958-8d67-87c9da20b9ac","Type":"ContainerDied","Data":"4e6a1f394df674a3470ba240256601cb5d31a3c880a8aafb4965dc5b621192d1"} Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.031261 5014 generic.go:334] "Generic (PLEG): container finished" podID="04070324-674e-4785-aada-ad9ffe6e89c8" containerID="a63b374e2b15c0781f81854b965d1c222a498df823d168b9df2eff34fbf21c26" exitCode=0 Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.031336 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" event={"ID":"04070324-674e-4785-aada-ad9ffe6e89c8","Type":"ContainerDied","Data":"a63b374e2b15c0781f81854b965d1c222a498df823d168b9df2eff34fbf21c26"} Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.035458 5014 generic.go:334] "Generic (PLEG): container finished" podID="c68ce8d8-f494-4971-8068-4fddf55fae97" containerID="6bbb3f45accc86e90047da58c3f5449de81dac9aa42cc01c120fc772d62b68c3" exitCode=0 Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.035587 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ftjbk" event={"ID":"c68ce8d8-f494-4971-8068-4fddf55fae97","Type":"ContainerDied","Data":"6bbb3f45accc86e90047da58c3f5449de81dac9aa42cc01c120fc772d62b68c3"} Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.038110 5014 generic.go:334] "Generic (PLEG): container finished" podID="942343e5-31c5-44bf-accb-42c83a176d0c" containerID="d688167b0074cedb82cec6ffa60377d0ae7cd15148d43b6d4704e490ce46b0a5" exitCode=0 Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.038188 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kfzpv" event={"ID":"942343e5-31c5-44bf-accb-42c83a176d0c","Type":"ContainerDied","Data":"d688167b0074cedb82cec6ffa60377d0ae7cd15148d43b6d4704e490ce46b0a5"} Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.039817 5014 generic.go:334] "Generic (PLEG): container finished" podID="9af08180-3fb3-439d-b8cb-7b65f03c0413" containerID="364d3ab5e8857490dab47c7d8e9d567e7934c7e7aca7557daf152e4160248c53" exitCode=0 Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.039842 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5wc2h" event={"ID":"9af08180-3fb3-439d-b8cb-7b65f03c0413","Type":"ContainerDied","Data":"364d3ab5e8857490dab47c7d8e9d567e7934c7e7aca7557daf152e4160248c53"} Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.092653 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qt7r9\" (UniqueName: \"kubernetes.io/projected/2c999244-e7a0-42b2-9c2d-4e9a722617cd-kube-api-access-qt7r9\") pod \"marketplace-operator-79b997595-fmzl2\" (UID: \"2c999244-e7a0-42b2-9c2d-4e9a722617cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-fmzl2" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.093237 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2c999244-e7a0-42b2-9c2d-4e9a722617cd-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fmzl2\" (UID: \"2c999244-e7a0-42b2-9c2d-4e9a722617cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-fmzl2" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.093266 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2c999244-e7a0-42b2-9c2d-4e9a722617cd-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fmzl2\" (UID: \"2c999244-e7a0-42b2-9c2d-4e9a722617cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-fmzl2" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.095783 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2c999244-e7a0-42b2-9c2d-4e9a722617cd-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fmzl2\" (UID: \"2c999244-e7a0-42b2-9c2d-4e9a722617cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-fmzl2" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.101375 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2c999244-e7a0-42b2-9c2d-4e9a722617cd-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fmzl2\" (UID: \"2c999244-e7a0-42b2-9c2d-4e9a722617cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-fmzl2" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.112773 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qt7r9\" (UniqueName: \"kubernetes.io/projected/2c999244-e7a0-42b2-9c2d-4e9a722617cd-kube-api-access-qt7r9\") pod \"marketplace-operator-79b997595-fmzl2\" (UID: \"2c999244-e7a0-42b2-9c2d-4e9a722617cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-fmzl2" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.226643 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fmzl2" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.378134 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.387691 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.396954 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhjfp\" (UniqueName: \"kubernetes.io/projected/c68ce8d8-f494-4971-8068-4fddf55fae97-kube-api-access-nhjfp\") pod \"c68ce8d8-f494-4971-8068-4fddf55fae97\" (UID: \"c68ce8d8-f494-4971-8068-4fddf55fae97\") " Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.397025 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/942343e5-31c5-44bf-accb-42c83a176d0c-utilities\") pod \"942343e5-31c5-44bf-accb-42c83a176d0c\" (UID: \"942343e5-31c5-44bf-accb-42c83a176d0c\") " Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.397059 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c68ce8d8-f494-4971-8068-4fddf55fae97-catalog-content\") pod \"c68ce8d8-f494-4971-8068-4fddf55fae97\" (UID: \"c68ce8d8-f494-4971-8068-4fddf55fae97\") " Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.397099 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c68ce8d8-f494-4971-8068-4fddf55fae97-utilities\") pod \"c68ce8d8-f494-4971-8068-4fddf55fae97\" (UID: \"c68ce8d8-f494-4971-8068-4fddf55fae97\") " Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.397120 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lllsh\" (UniqueName: \"kubernetes.io/projected/942343e5-31c5-44bf-accb-42c83a176d0c-kube-api-access-lllsh\") pod \"942343e5-31c5-44bf-accb-42c83a176d0c\" (UID: \"942343e5-31c5-44bf-accb-42c83a176d0c\") " Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.397147 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/942343e5-31c5-44bf-accb-42c83a176d0c-catalog-content\") pod \"942343e5-31c5-44bf-accb-42c83a176d0c\" (UID: \"942343e5-31c5-44bf-accb-42c83a176d0c\") " Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.403576 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c68ce8d8-f494-4971-8068-4fddf55fae97-kube-api-access-nhjfp" (OuterVolumeSpecName: "kube-api-access-nhjfp") pod "c68ce8d8-f494-4971-8068-4fddf55fae97" (UID: "c68ce8d8-f494-4971-8068-4fddf55fae97"). InnerVolumeSpecName "kube-api-access-nhjfp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.404906 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/942343e5-31c5-44bf-accb-42c83a176d0c-utilities" (OuterVolumeSpecName: "utilities") pod "942343e5-31c5-44bf-accb-42c83a176d0c" (UID: "942343e5-31c5-44bf-accb-42c83a176d0c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.405387 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c68ce8d8-f494-4971-8068-4fddf55fae97-utilities" (OuterVolumeSpecName: "utilities") pod "c68ce8d8-f494-4971-8068-4fddf55fae97" (UID: "c68ce8d8-f494-4971-8068-4fddf55fae97"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.410349 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/942343e5-31c5-44bf-accb-42c83a176d0c-kube-api-access-lllsh" (OuterVolumeSpecName: "kube-api-access-lllsh") pod "942343e5-31c5-44bf-accb-42c83a176d0c" (UID: "942343e5-31c5-44bf-accb-42c83a176d0c"). InnerVolumeSpecName "kube-api-access-lllsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.469977 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fmzl2"] Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.470475 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c68ce8d8-f494-4971-8068-4fddf55fae97-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c68ce8d8-f494-4971-8068-4fddf55fae97" (UID: "c68ce8d8-f494-4971-8068-4fddf55fae97"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.474153 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/942343e5-31c5-44bf-accb-42c83a176d0c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "942343e5-31c5-44bf-accb-42c83a176d0c" (UID: "942343e5-31c5-44bf-accb-42c83a176d0c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.500251 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhjfp\" (UniqueName: \"kubernetes.io/projected/c68ce8d8-f494-4971-8068-4fddf55fae97-kube-api-access-nhjfp\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.500285 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/942343e5-31c5-44bf-accb-42c83a176d0c-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.500296 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c68ce8d8-f494-4971-8068-4fddf55fae97-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.500307 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c68ce8d8-f494-4971-8068-4fddf55fae97-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.500316 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lllsh\" (UniqueName: \"kubernetes.io/projected/942343e5-31c5-44bf-accb-42c83a176d0c-kube-api-access-lllsh\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.500326 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/942343e5-31c5-44bf-accb-42c83a176d0c-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.735261 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.768960 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.802226 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.906880 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/04070324-674e-4785-aada-ad9ffe6e89c8-marketplace-trusted-ca\") pod \"04070324-674e-4785-aada-ad9ffe6e89c8\" (UID: \"04070324-674e-4785-aada-ad9ffe6e89c8\") " Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.906927 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9af08180-3fb3-439d-b8cb-7b65f03c0413-utilities\") pod \"9af08180-3fb3-439d-b8cb-7b65f03c0413\" (UID: \"9af08180-3fb3-439d-b8cb-7b65f03c0413\") " Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.906955 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wzpg\" (UniqueName: \"kubernetes.io/projected/9557159b-e76f-4958-8d67-87c9da20b9ac-kube-api-access-5wzpg\") pod \"9557159b-e76f-4958-8d67-87c9da20b9ac\" (UID: \"9557159b-e76f-4958-8d67-87c9da20b9ac\") " Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.906975 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9557159b-e76f-4958-8d67-87c9da20b9ac-catalog-content\") pod \"9557159b-e76f-4958-8d67-87c9da20b9ac\" (UID: \"9557159b-e76f-4958-8d67-87c9da20b9ac\") " Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.907009 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9af08180-3fb3-439d-b8cb-7b65f03c0413-catalog-content\") pod \"9af08180-3fb3-439d-b8cb-7b65f03c0413\" (UID: \"9af08180-3fb3-439d-b8cb-7b65f03c0413\") " Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.907031 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9557159b-e76f-4958-8d67-87c9da20b9ac-utilities\") pod \"9557159b-e76f-4958-8d67-87c9da20b9ac\" (UID: \"9557159b-e76f-4958-8d67-87c9da20b9ac\") " Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.907051 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6n2g\" (UniqueName: \"kubernetes.io/projected/9af08180-3fb3-439d-b8cb-7b65f03c0413-kube-api-access-l6n2g\") pod \"9af08180-3fb3-439d-b8cb-7b65f03c0413\" (UID: \"9af08180-3fb3-439d-b8cb-7b65f03c0413\") " Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.907086 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7jjh\" (UniqueName: \"kubernetes.io/projected/04070324-674e-4785-aada-ad9ffe6e89c8-kube-api-access-x7jjh\") pod \"04070324-674e-4785-aada-ad9ffe6e89c8\" (UID: \"04070324-674e-4785-aada-ad9ffe6e89c8\") " Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.907110 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/04070324-674e-4785-aada-ad9ffe6e89c8-marketplace-operator-metrics\") pod \"04070324-674e-4785-aada-ad9ffe6e89c8\" (UID: \"04070324-674e-4785-aada-ad9ffe6e89c8\") " Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.908711 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9af08180-3fb3-439d-b8cb-7b65f03c0413-utilities" (OuterVolumeSpecName: "utilities") pod "9af08180-3fb3-439d-b8cb-7b65f03c0413" (UID: "9af08180-3fb3-439d-b8cb-7b65f03c0413"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.908779 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9557159b-e76f-4958-8d67-87c9da20b9ac-utilities" (OuterVolumeSpecName: "utilities") pod "9557159b-e76f-4958-8d67-87c9da20b9ac" (UID: "9557159b-e76f-4958-8d67-87c9da20b9ac"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.909141 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04070324-674e-4785-aada-ad9ffe6e89c8-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "04070324-674e-4785-aada-ad9ffe6e89c8" (UID: "04070324-674e-4785-aada-ad9ffe6e89c8"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.911469 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9557159b-e76f-4958-8d67-87c9da20b9ac-kube-api-access-5wzpg" (OuterVolumeSpecName: "kube-api-access-5wzpg") pod "9557159b-e76f-4958-8d67-87c9da20b9ac" (UID: "9557159b-e76f-4958-8d67-87c9da20b9ac"). InnerVolumeSpecName "kube-api-access-5wzpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.916641 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9af08180-3fb3-439d-b8cb-7b65f03c0413-kube-api-access-l6n2g" (OuterVolumeSpecName: "kube-api-access-l6n2g") pod "9af08180-3fb3-439d-b8cb-7b65f03c0413" (UID: "9af08180-3fb3-439d-b8cb-7b65f03c0413"). InnerVolumeSpecName "kube-api-access-l6n2g". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.916846 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04070324-674e-4785-aada-ad9ffe6e89c8-kube-api-access-x7jjh" (OuterVolumeSpecName: "kube-api-access-x7jjh") pod "04070324-674e-4785-aada-ad9ffe6e89c8" (UID: "04070324-674e-4785-aada-ad9ffe6e89c8"). InnerVolumeSpecName "kube-api-access-x7jjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.917065 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04070324-674e-4785-aada-ad9ffe6e89c8-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "04070324-674e-4785-aada-ad9ffe6e89c8" (UID: "04070324-674e-4785-aada-ad9ffe6e89c8"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:35:41 crc kubenswrapper[5014]: I1006 21:35:41.929287 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9af08180-3fb3-439d-b8cb-7b65f03c0413-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9af08180-3fb3-439d-b8cb-7b65f03c0413" (UID: "9af08180-3fb3-439d-b8cb-7b65f03c0413"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.001050 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9557159b-e76f-4958-8d67-87c9da20b9ac-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9557159b-e76f-4958-8d67-87c9da20b9ac" (UID: "9557159b-e76f-4958-8d67-87c9da20b9ac"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.008250 5014 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/04070324-674e-4785-aada-ad9ffe6e89c8-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.008292 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9af08180-3fb3-439d-b8cb-7b65f03c0413-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.008307 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wzpg\" (UniqueName: \"kubernetes.io/projected/9557159b-e76f-4958-8d67-87c9da20b9ac-kube-api-access-5wzpg\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.008319 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9557159b-e76f-4958-8d67-87c9da20b9ac-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.008329 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9af08180-3fb3-439d-b8cb-7b65f03c0413-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.008340 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9557159b-e76f-4958-8d67-87c9da20b9ac-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.008353 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l6n2g\" (UniqueName: \"kubernetes.io/projected/9af08180-3fb3-439d-b8cb-7b65f03c0413-kube-api-access-l6n2g\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.008367 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7jjh\" (UniqueName: \"kubernetes.io/projected/04070324-674e-4785-aada-ad9ffe6e89c8-kube-api-access-x7jjh\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.008377 5014 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/04070324-674e-4785-aada-ad9ffe6e89c8-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.047261 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cgv2z" event={"ID":"9557159b-e76f-4958-8d67-87c9da20b9ac","Type":"ContainerDied","Data":"043fca04a620c08076105b12f5b1694cd14d45e2c83e101501e6df474b5ce358"} Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.047332 5014 scope.go:117] "RemoveContainer" containerID="4e6a1f394df674a3470ba240256601cb5d31a3c880a8aafb4965dc5b621192d1" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.047391 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cgv2z" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.049327 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fmzl2" event={"ID":"2c999244-e7a0-42b2-9c2d-4e9a722617cd","Type":"ContainerStarted","Data":"1769120173b99896d2f4358e0cb98190e4f11732c86d0b31dac9fb1b0524d439"} Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.049399 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fmzl2" event={"ID":"2c999244-e7a0-42b2-9c2d-4e9a722617cd","Type":"ContainerStarted","Data":"ef045b9bc7b725a02e02e1da91b169006847f89b73e7df3d5df85abd986a93fa"} Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.049568 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-fmzl2" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.066926 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-fmzl2" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.070584 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ftjbk" event={"ID":"c68ce8d8-f494-4971-8068-4fddf55fae97","Type":"ContainerDied","Data":"c5bb11604f3bf73e68c4648d746bc6b791fbfdd14066e918555bb9b2cd87f543"} Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.071399 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ftjbk" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.078261 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kfzpv" event={"ID":"942343e5-31c5-44bf-accb-42c83a176d0c","Type":"ContainerDied","Data":"a3035f52e2c4799f1c09f9429364c3678e73d77433bde23c5e3b061e4118c9e2"} Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.078293 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kfzpv" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.082046 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5wc2h" event={"ID":"9af08180-3fb3-439d-b8cb-7b65f03c0413","Type":"ContainerDied","Data":"a97d323a3cec8637212efbeb72fce8bbcdc30fe7250ea610b3480ab9ce94f948"} Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.082181 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5wc2h" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.083833 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" event={"ID":"04070324-674e-4785-aada-ad9ffe6e89c8","Type":"ContainerDied","Data":"e2bc7670257cca0141a182589eafca524f48097d35760fb351f9937204e8533f"} Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.083945 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-lts6v" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.092304 5014 scope.go:117] "RemoveContainer" containerID="8bbb43e1b970a26b3c19e2fd6769b8cfbb194193b5f9f7475e6eff5ae5d3f1bd" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.112318 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-fmzl2" podStartSLOduration=2.112288174 podStartE2EDuration="2.112288174s" podCreationTimestamp="2025-10-06 21:35:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:35:42.104369943 +0000 UTC m=+287.397406717" watchObservedRunningTime="2025-10-06 21:35:42.112288174 +0000 UTC m=+287.405324928" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.130353 5014 scope.go:117] "RemoveContainer" containerID="26ea44509b670ee6f169ff43f00ff10a37fe0a55018293a183a844289d0c83ec" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.134164 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cgv2z"] Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.140981 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cgv2z"] Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.146644 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lts6v"] Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.165308 5014 scope.go:117] "RemoveContainer" containerID="6bbb3f45accc86e90047da58c3f5449de81dac9aa42cc01c120fc772d62b68c3" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.174208 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-lts6v"] Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.178666 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kfzpv"] Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.179385 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kfzpv"] Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.183186 5014 scope.go:117] "RemoveContainer" containerID="57cc09db6f57f1fbe7a95787820e28c7ec5b12c623cc1f086b313e2f9bea10dd" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.183353 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ftjbk"] Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.186521 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ftjbk"] Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.192758 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5wc2h"] Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.196957 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5wc2h"] Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.202174 5014 scope.go:117] "RemoveContainer" containerID="1840f363ead25ce3fd9b5d47c5abd33013c6cbc5a26ec78abbcba64012de3e38" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.219075 5014 scope.go:117] "RemoveContainer" containerID="d688167b0074cedb82cec6ffa60377d0ae7cd15148d43b6d4704e490ce46b0a5" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.236004 5014 scope.go:117] "RemoveContainer" containerID="f698fa53ea42102072b37c8455fa773f11c83f0dd0835fbe1f7fec5bee919b0b" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.250566 5014 scope.go:117] "RemoveContainer" containerID="c7162139c0e120128611d52236f4c88a60631d9453ac9b088d7c3f3f3a885de7" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.263248 5014 scope.go:117] "RemoveContainer" containerID="364d3ab5e8857490dab47c7d8e9d567e7934c7e7aca7557daf152e4160248c53" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.289293 5014 scope.go:117] "RemoveContainer" containerID="473f7a9bbd9e40f5c008991fc3fd47765efe5620fe7a7fa7e2585a53f09e5c82" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.309002 5014 scope.go:117] "RemoveContainer" containerID="830819e70c69de62dc8a5490c09f47ffac6eae89343f26e51219874da8dc3a55" Oct 06 21:35:42 crc kubenswrapper[5014]: I1006 21:35:42.326205 5014 scope.go:117] "RemoveContainer" containerID="a63b374e2b15c0781f81854b965d1c222a498df823d168b9df2eff34fbf21c26" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.069149 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-krdbw"] Oct 06 21:35:43 crc kubenswrapper[5014]: E1006 21:35:43.069506 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c68ce8d8-f494-4971-8068-4fddf55fae97" containerName="extract-content" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.069524 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c68ce8d8-f494-4971-8068-4fddf55fae97" containerName="extract-content" Oct 06 21:35:43 crc kubenswrapper[5014]: E1006 21:35:43.069544 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="942343e5-31c5-44bf-accb-42c83a176d0c" containerName="extract-utilities" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.069638 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="942343e5-31c5-44bf-accb-42c83a176d0c" containerName="extract-utilities" Oct 06 21:35:43 crc kubenswrapper[5014]: E1006 21:35:43.069652 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9557159b-e76f-4958-8d67-87c9da20b9ac" containerName="extract-content" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.069661 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9557159b-e76f-4958-8d67-87c9da20b9ac" containerName="extract-content" Oct 06 21:35:43 crc kubenswrapper[5014]: E1006 21:35:43.069673 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c68ce8d8-f494-4971-8068-4fddf55fae97" containerName="registry-server" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.069683 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c68ce8d8-f494-4971-8068-4fddf55fae97" containerName="registry-server" Oct 06 21:35:43 crc kubenswrapper[5014]: E1006 21:35:43.069696 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9af08180-3fb3-439d-b8cb-7b65f03c0413" containerName="registry-server" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.069703 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9af08180-3fb3-439d-b8cb-7b65f03c0413" containerName="registry-server" Oct 06 21:35:43 crc kubenswrapper[5014]: E1006 21:35:43.069713 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9af08180-3fb3-439d-b8cb-7b65f03c0413" containerName="extract-utilities" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.069722 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9af08180-3fb3-439d-b8cb-7b65f03c0413" containerName="extract-utilities" Oct 06 21:35:43 crc kubenswrapper[5014]: E1006 21:35:43.069749 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9557159b-e76f-4958-8d67-87c9da20b9ac" containerName="registry-server" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.069756 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9557159b-e76f-4958-8d67-87c9da20b9ac" containerName="registry-server" Oct 06 21:35:43 crc kubenswrapper[5014]: E1006 21:35:43.069772 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9557159b-e76f-4958-8d67-87c9da20b9ac" containerName="extract-utilities" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.069780 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9557159b-e76f-4958-8d67-87c9da20b9ac" containerName="extract-utilities" Oct 06 21:35:43 crc kubenswrapper[5014]: E1006 21:35:43.069793 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c68ce8d8-f494-4971-8068-4fddf55fae97" containerName="extract-utilities" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.069801 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c68ce8d8-f494-4971-8068-4fddf55fae97" containerName="extract-utilities" Oct 06 21:35:43 crc kubenswrapper[5014]: E1006 21:35:43.069811 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04070324-674e-4785-aada-ad9ffe6e89c8" containerName="marketplace-operator" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.069819 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="04070324-674e-4785-aada-ad9ffe6e89c8" containerName="marketplace-operator" Oct 06 21:35:43 crc kubenswrapper[5014]: E1006 21:35:43.069830 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="942343e5-31c5-44bf-accb-42c83a176d0c" containerName="registry-server" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.069838 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="942343e5-31c5-44bf-accb-42c83a176d0c" containerName="registry-server" Oct 06 21:35:43 crc kubenswrapper[5014]: E1006 21:35:43.069848 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9af08180-3fb3-439d-b8cb-7b65f03c0413" containerName="extract-content" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.069857 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9af08180-3fb3-439d-b8cb-7b65f03c0413" containerName="extract-content" Oct 06 21:35:43 crc kubenswrapper[5014]: E1006 21:35:43.069868 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="942343e5-31c5-44bf-accb-42c83a176d0c" containerName="extract-content" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.069876 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="942343e5-31c5-44bf-accb-42c83a176d0c" containerName="extract-content" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.069991 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="c68ce8d8-f494-4971-8068-4fddf55fae97" containerName="registry-server" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.070006 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="9557159b-e76f-4958-8d67-87c9da20b9ac" containerName="registry-server" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.070016 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="942343e5-31c5-44bf-accb-42c83a176d0c" containerName="registry-server" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.070035 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="04070324-674e-4785-aada-ad9ffe6e89c8" containerName="marketplace-operator" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.070045 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="9af08180-3fb3-439d-b8cb-7b65f03c0413" containerName="registry-server" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.070941 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-krdbw" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.073015 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.078936 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-krdbw"] Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.225475 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c883ea5-1a89-459e-ad85-0ba6deaf0b7e-catalog-content\") pod \"redhat-marketplace-krdbw\" (UID: \"0c883ea5-1a89-459e-ad85-0ba6deaf0b7e\") " pod="openshift-marketplace/redhat-marketplace-krdbw" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.225578 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c883ea5-1a89-459e-ad85-0ba6deaf0b7e-utilities\") pod \"redhat-marketplace-krdbw\" (UID: \"0c883ea5-1a89-459e-ad85-0ba6deaf0b7e\") " pod="openshift-marketplace/redhat-marketplace-krdbw" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.225610 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkgth\" (UniqueName: \"kubernetes.io/projected/0c883ea5-1a89-459e-ad85-0ba6deaf0b7e-kube-api-access-fkgth\") pod \"redhat-marketplace-krdbw\" (UID: \"0c883ea5-1a89-459e-ad85-0ba6deaf0b7e\") " pod="openshift-marketplace/redhat-marketplace-krdbw" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.271225 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-b2m7z"] Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.272448 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b2m7z" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.276600 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.299537 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b2m7z"] Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.327193 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c883ea5-1a89-459e-ad85-0ba6deaf0b7e-utilities\") pod \"redhat-marketplace-krdbw\" (UID: \"0c883ea5-1a89-459e-ad85-0ba6deaf0b7e\") " pod="openshift-marketplace/redhat-marketplace-krdbw" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.327255 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkgth\" (UniqueName: \"kubernetes.io/projected/0c883ea5-1a89-459e-ad85-0ba6deaf0b7e-kube-api-access-fkgth\") pod \"redhat-marketplace-krdbw\" (UID: \"0c883ea5-1a89-459e-ad85-0ba6deaf0b7e\") " pod="openshift-marketplace/redhat-marketplace-krdbw" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.327536 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c883ea5-1a89-459e-ad85-0ba6deaf0b7e-catalog-content\") pod \"redhat-marketplace-krdbw\" (UID: \"0c883ea5-1a89-459e-ad85-0ba6deaf0b7e\") " pod="openshift-marketplace/redhat-marketplace-krdbw" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.328090 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c883ea5-1a89-459e-ad85-0ba6deaf0b7e-catalog-content\") pod \"redhat-marketplace-krdbw\" (UID: \"0c883ea5-1a89-459e-ad85-0ba6deaf0b7e\") " pod="openshift-marketplace/redhat-marketplace-krdbw" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.328252 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c883ea5-1a89-459e-ad85-0ba6deaf0b7e-utilities\") pod \"redhat-marketplace-krdbw\" (UID: \"0c883ea5-1a89-459e-ad85-0ba6deaf0b7e\") " pod="openshift-marketplace/redhat-marketplace-krdbw" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.349074 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkgth\" (UniqueName: \"kubernetes.io/projected/0c883ea5-1a89-459e-ad85-0ba6deaf0b7e-kube-api-access-fkgth\") pod \"redhat-marketplace-krdbw\" (UID: \"0c883ea5-1a89-459e-ad85-0ba6deaf0b7e\") " pod="openshift-marketplace/redhat-marketplace-krdbw" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.428519 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-krdbw" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.429112 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdws7\" (UniqueName: \"kubernetes.io/projected/85f140b3-dbc6-495e-b10a-fff600c38b58-kube-api-access-xdws7\") pod \"redhat-operators-b2m7z\" (UID: \"85f140b3-dbc6-495e-b10a-fff600c38b58\") " pod="openshift-marketplace/redhat-operators-b2m7z" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.429298 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85f140b3-dbc6-495e-b10a-fff600c38b58-utilities\") pod \"redhat-operators-b2m7z\" (UID: \"85f140b3-dbc6-495e-b10a-fff600c38b58\") " pod="openshift-marketplace/redhat-operators-b2m7z" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.429335 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85f140b3-dbc6-495e-b10a-fff600c38b58-catalog-content\") pod \"redhat-operators-b2m7z\" (UID: \"85f140b3-dbc6-495e-b10a-fff600c38b58\") " pod="openshift-marketplace/redhat-operators-b2m7z" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.495899 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04070324-674e-4785-aada-ad9ffe6e89c8" path="/var/lib/kubelet/pods/04070324-674e-4785-aada-ad9ffe6e89c8/volumes" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.496705 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="942343e5-31c5-44bf-accb-42c83a176d0c" path="/var/lib/kubelet/pods/942343e5-31c5-44bf-accb-42c83a176d0c/volumes" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.497505 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9557159b-e76f-4958-8d67-87c9da20b9ac" path="/var/lib/kubelet/pods/9557159b-e76f-4958-8d67-87c9da20b9ac/volumes" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.498839 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9af08180-3fb3-439d-b8cb-7b65f03c0413" path="/var/lib/kubelet/pods/9af08180-3fb3-439d-b8cb-7b65f03c0413/volumes" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.499568 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c68ce8d8-f494-4971-8068-4fddf55fae97" path="/var/lib/kubelet/pods/c68ce8d8-f494-4971-8068-4fddf55fae97/volumes" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.530355 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85f140b3-dbc6-495e-b10a-fff600c38b58-utilities\") pod \"redhat-operators-b2m7z\" (UID: \"85f140b3-dbc6-495e-b10a-fff600c38b58\") " pod="openshift-marketplace/redhat-operators-b2m7z" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.531023 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85f140b3-dbc6-495e-b10a-fff600c38b58-catalog-content\") pod \"redhat-operators-b2m7z\" (UID: \"85f140b3-dbc6-495e-b10a-fff600c38b58\") " pod="openshift-marketplace/redhat-operators-b2m7z" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.531132 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdws7\" (UniqueName: \"kubernetes.io/projected/85f140b3-dbc6-495e-b10a-fff600c38b58-kube-api-access-xdws7\") pod \"redhat-operators-b2m7z\" (UID: \"85f140b3-dbc6-495e-b10a-fff600c38b58\") " pod="openshift-marketplace/redhat-operators-b2m7z" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.531582 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85f140b3-dbc6-495e-b10a-fff600c38b58-catalog-content\") pod \"redhat-operators-b2m7z\" (UID: \"85f140b3-dbc6-495e-b10a-fff600c38b58\") " pod="openshift-marketplace/redhat-operators-b2m7z" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.532093 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85f140b3-dbc6-495e-b10a-fff600c38b58-utilities\") pod \"redhat-operators-b2m7z\" (UID: \"85f140b3-dbc6-495e-b10a-fff600c38b58\") " pod="openshift-marketplace/redhat-operators-b2m7z" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.555973 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdws7\" (UniqueName: \"kubernetes.io/projected/85f140b3-dbc6-495e-b10a-fff600c38b58-kube-api-access-xdws7\") pod \"redhat-operators-b2m7z\" (UID: \"85f140b3-dbc6-495e-b10a-fff600c38b58\") " pod="openshift-marketplace/redhat-operators-b2m7z" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.595519 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b2m7z" Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.788705 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b2m7z"] Oct 06 21:35:43 crc kubenswrapper[5014]: W1006 21:35:43.797589 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod85f140b3_dbc6_495e_b10a_fff600c38b58.slice/crio-6614b15af3564e9eb143ad69cd3794c4a43196ce30e33a15e46de7204482d14c WatchSource:0}: Error finding container 6614b15af3564e9eb143ad69cd3794c4a43196ce30e33a15e46de7204482d14c: Status 404 returned error can't find the container with id 6614b15af3564e9eb143ad69cd3794c4a43196ce30e33a15e46de7204482d14c Oct 06 21:35:43 crc kubenswrapper[5014]: I1006 21:35:43.843153 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-krdbw"] Oct 06 21:35:43 crc kubenswrapper[5014]: W1006 21:35:43.861846 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c883ea5_1a89_459e_ad85_0ba6deaf0b7e.slice/crio-709e8599f3b4259d804dc026923ad2c08bcbde599ff36ea3ddf607ba25f8409e WatchSource:0}: Error finding container 709e8599f3b4259d804dc026923ad2c08bcbde599ff36ea3ddf607ba25f8409e: Status 404 returned error can't find the container with id 709e8599f3b4259d804dc026923ad2c08bcbde599ff36ea3ddf607ba25f8409e Oct 06 21:35:44 crc kubenswrapper[5014]: I1006 21:35:44.108610 5014 generic.go:334] "Generic (PLEG): container finished" podID="0c883ea5-1a89-459e-ad85-0ba6deaf0b7e" containerID="3e79c44f2b26a4c2a53eea64e3a88fce6761f5198e20c05ba1078193f9f25bd8" exitCode=0 Oct 06 21:35:44 crc kubenswrapper[5014]: I1006 21:35:44.108938 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-krdbw" event={"ID":"0c883ea5-1a89-459e-ad85-0ba6deaf0b7e","Type":"ContainerDied","Data":"3e79c44f2b26a4c2a53eea64e3a88fce6761f5198e20c05ba1078193f9f25bd8"} Oct 06 21:35:44 crc kubenswrapper[5014]: I1006 21:35:44.109205 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-krdbw" event={"ID":"0c883ea5-1a89-459e-ad85-0ba6deaf0b7e","Type":"ContainerStarted","Data":"709e8599f3b4259d804dc026923ad2c08bcbde599ff36ea3ddf607ba25f8409e"} Oct 06 21:35:44 crc kubenswrapper[5014]: I1006 21:35:44.113082 5014 generic.go:334] "Generic (PLEG): container finished" podID="85f140b3-dbc6-495e-b10a-fff600c38b58" containerID="40bc45169525e86aa893ceed91c9383e0062190899251c72557ba2140c30e8aa" exitCode=0 Oct 06 21:35:44 crc kubenswrapper[5014]: I1006 21:35:44.114149 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b2m7z" event={"ID":"85f140b3-dbc6-495e-b10a-fff600c38b58","Type":"ContainerDied","Data":"40bc45169525e86aa893ceed91c9383e0062190899251c72557ba2140c30e8aa"} Oct 06 21:35:44 crc kubenswrapper[5014]: I1006 21:35:44.114182 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b2m7z" event={"ID":"85f140b3-dbc6-495e-b10a-fff600c38b58","Type":"ContainerStarted","Data":"6614b15af3564e9eb143ad69cd3794c4a43196ce30e33a15e46de7204482d14c"} Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.123267 5014 generic.go:334] "Generic (PLEG): container finished" podID="0c883ea5-1a89-459e-ad85-0ba6deaf0b7e" containerID="a10022b45427a98925a7da1920dca2362dc3878bcae0158c6294ec008524853d" exitCode=0 Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.123337 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-krdbw" event={"ID":"0c883ea5-1a89-459e-ad85-0ba6deaf0b7e","Type":"ContainerDied","Data":"a10022b45427a98925a7da1920dca2362dc3878bcae0158c6294ec008524853d"} Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.126794 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b2m7z" event={"ID":"85f140b3-dbc6-495e-b10a-fff600c38b58","Type":"ContainerStarted","Data":"407f6afe54721e6ff6f5cf02cfe3ecfea6380607e9f7a63904b3d718a60105d2"} Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.481243 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6h862"] Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.482484 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6h862" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.485350 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.496119 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6h862"] Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.663924 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6m7j\" (UniqueName: \"kubernetes.io/projected/e2d1f2da-6708-4d69-ac51-4789097a2ae3-kube-api-access-x6m7j\") pod \"certified-operators-6h862\" (UID: \"e2d1f2da-6708-4d69-ac51-4789097a2ae3\") " pod="openshift-marketplace/certified-operators-6h862" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.664016 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2d1f2da-6708-4d69-ac51-4789097a2ae3-catalog-content\") pod \"certified-operators-6h862\" (UID: \"e2d1f2da-6708-4d69-ac51-4789097a2ae3\") " pod="openshift-marketplace/certified-operators-6h862" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.664070 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2d1f2da-6708-4d69-ac51-4789097a2ae3-utilities\") pod \"certified-operators-6h862\" (UID: \"e2d1f2da-6708-4d69-ac51-4789097a2ae3\") " pod="openshift-marketplace/certified-operators-6h862" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.669430 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4kg57"] Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.672962 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4kg57" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.675741 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.679144 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4kg57"] Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.764812 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2d1f2da-6708-4d69-ac51-4789097a2ae3-catalog-content\") pod \"certified-operators-6h862\" (UID: \"e2d1f2da-6708-4d69-ac51-4789097a2ae3\") " pod="openshift-marketplace/certified-operators-6h862" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.765356 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41881b03-6274-4b6b-a652-32bfd7ea5be3-utilities\") pod \"community-operators-4kg57\" (UID: \"41881b03-6274-4b6b-a652-32bfd7ea5be3\") " pod="openshift-marketplace/community-operators-4kg57" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.765392 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41881b03-6274-4b6b-a652-32bfd7ea5be3-catalog-content\") pod \"community-operators-4kg57\" (UID: \"41881b03-6274-4b6b-a652-32bfd7ea5be3\") " pod="openshift-marketplace/community-operators-4kg57" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.765427 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2d1f2da-6708-4d69-ac51-4789097a2ae3-utilities\") pod \"certified-operators-6h862\" (UID: \"e2d1f2da-6708-4d69-ac51-4789097a2ae3\") " pod="openshift-marketplace/certified-operators-6h862" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.765726 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8s8vs\" (UniqueName: \"kubernetes.io/projected/41881b03-6274-4b6b-a652-32bfd7ea5be3-kube-api-access-8s8vs\") pod \"community-operators-4kg57\" (UID: \"41881b03-6274-4b6b-a652-32bfd7ea5be3\") " pod="openshift-marketplace/community-operators-4kg57" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.765968 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6m7j\" (UniqueName: \"kubernetes.io/projected/e2d1f2da-6708-4d69-ac51-4789097a2ae3-kube-api-access-x6m7j\") pod \"certified-operators-6h862\" (UID: \"e2d1f2da-6708-4d69-ac51-4789097a2ae3\") " pod="openshift-marketplace/certified-operators-6h862" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.766072 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2d1f2da-6708-4d69-ac51-4789097a2ae3-utilities\") pod \"certified-operators-6h862\" (UID: \"e2d1f2da-6708-4d69-ac51-4789097a2ae3\") " pod="openshift-marketplace/certified-operators-6h862" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.766156 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2d1f2da-6708-4d69-ac51-4789097a2ae3-catalog-content\") pod \"certified-operators-6h862\" (UID: \"e2d1f2da-6708-4d69-ac51-4789097a2ae3\") " pod="openshift-marketplace/certified-operators-6h862" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.790422 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6m7j\" (UniqueName: \"kubernetes.io/projected/e2d1f2da-6708-4d69-ac51-4789097a2ae3-kube-api-access-x6m7j\") pod \"certified-operators-6h862\" (UID: \"e2d1f2da-6708-4d69-ac51-4789097a2ae3\") " pod="openshift-marketplace/certified-operators-6h862" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.798793 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6h862" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.867226 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41881b03-6274-4b6b-a652-32bfd7ea5be3-utilities\") pod \"community-operators-4kg57\" (UID: \"41881b03-6274-4b6b-a652-32bfd7ea5be3\") " pod="openshift-marketplace/community-operators-4kg57" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.867275 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41881b03-6274-4b6b-a652-32bfd7ea5be3-catalog-content\") pod \"community-operators-4kg57\" (UID: \"41881b03-6274-4b6b-a652-32bfd7ea5be3\") " pod="openshift-marketplace/community-operators-4kg57" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.867332 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8s8vs\" (UniqueName: \"kubernetes.io/projected/41881b03-6274-4b6b-a652-32bfd7ea5be3-kube-api-access-8s8vs\") pod \"community-operators-4kg57\" (UID: \"41881b03-6274-4b6b-a652-32bfd7ea5be3\") " pod="openshift-marketplace/community-operators-4kg57" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.868181 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41881b03-6274-4b6b-a652-32bfd7ea5be3-utilities\") pod \"community-operators-4kg57\" (UID: \"41881b03-6274-4b6b-a652-32bfd7ea5be3\") " pod="openshift-marketplace/community-operators-4kg57" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.868444 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41881b03-6274-4b6b-a652-32bfd7ea5be3-catalog-content\") pod \"community-operators-4kg57\" (UID: \"41881b03-6274-4b6b-a652-32bfd7ea5be3\") " pod="openshift-marketplace/community-operators-4kg57" Oct 06 21:35:45 crc kubenswrapper[5014]: I1006 21:35:45.888828 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8s8vs\" (UniqueName: \"kubernetes.io/projected/41881b03-6274-4b6b-a652-32bfd7ea5be3-kube-api-access-8s8vs\") pod \"community-operators-4kg57\" (UID: \"41881b03-6274-4b6b-a652-32bfd7ea5be3\") " pod="openshift-marketplace/community-operators-4kg57" Oct 06 21:35:46 crc kubenswrapper[5014]: I1006 21:35:46.002760 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4kg57" Oct 06 21:35:46 crc kubenswrapper[5014]: I1006 21:35:46.133737 5014 generic.go:334] "Generic (PLEG): container finished" podID="85f140b3-dbc6-495e-b10a-fff600c38b58" containerID="407f6afe54721e6ff6f5cf02cfe3ecfea6380607e9f7a63904b3d718a60105d2" exitCode=0 Oct 06 21:35:46 crc kubenswrapper[5014]: I1006 21:35:46.133842 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b2m7z" event={"ID":"85f140b3-dbc6-495e-b10a-fff600c38b58","Type":"ContainerDied","Data":"407f6afe54721e6ff6f5cf02cfe3ecfea6380607e9f7a63904b3d718a60105d2"} Oct 06 21:35:46 crc kubenswrapper[5014]: I1006 21:35:46.138580 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-krdbw" event={"ID":"0c883ea5-1a89-459e-ad85-0ba6deaf0b7e","Type":"ContainerStarted","Data":"2e6c2202902f01f00745045a1da741bfa934d98fb83ff849fa66ccdc5c90e141"} Oct 06 21:35:46 crc kubenswrapper[5014]: I1006 21:35:46.180614 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-krdbw" podStartSLOduration=1.692572264 podStartE2EDuration="3.180589528s" podCreationTimestamp="2025-10-06 21:35:43 +0000 UTC" firstStartedPulling="2025-10-06 21:35:44.110575736 +0000 UTC m=+289.403612520" lastFinishedPulling="2025-10-06 21:35:45.59859305 +0000 UTC m=+290.891629784" observedRunningTime="2025-10-06 21:35:46.176414085 +0000 UTC m=+291.469450819" watchObservedRunningTime="2025-10-06 21:35:46.180589528 +0000 UTC m=+291.473626262" Oct 06 21:35:46 crc kubenswrapper[5014]: I1006 21:35:46.193598 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4kg57"] Oct 06 21:35:46 crc kubenswrapper[5014]: W1006 21:35:46.202747 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod41881b03_6274_4b6b_a652_32bfd7ea5be3.slice/crio-90df76fc9a4b4617cdb9a1b84150a36b209a51ad968b3d86d6cbe5521c3d3533 WatchSource:0}: Error finding container 90df76fc9a4b4617cdb9a1b84150a36b209a51ad968b3d86d6cbe5521c3d3533: Status 404 returned error can't find the container with id 90df76fc9a4b4617cdb9a1b84150a36b209a51ad968b3d86d6cbe5521c3d3533 Oct 06 21:35:46 crc kubenswrapper[5014]: W1006 21:35:46.205651 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode2d1f2da_6708_4d69_ac51_4789097a2ae3.slice/crio-d1f68bd8b8c5749216a779d6f14ae49d1e68ad5e9e505bc0387d4c528a6f0038 WatchSource:0}: Error finding container d1f68bd8b8c5749216a779d6f14ae49d1e68ad5e9e505bc0387d4c528a6f0038: Status 404 returned error can't find the container with id d1f68bd8b8c5749216a779d6f14ae49d1e68ad5e9e505bc0387d4c528a6f0038 Oct 06 21:35:46 crc kubenswrapper[5014]: I1006 21:35:46.206194 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6h862"] Oct 06 21:35:47 crc kubenswrapper[5014]: I1006 21:35:47.147742 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b2m7z" event={"ID":"85f140b3-dbc6-495e-b10a-fff600c38b58","Type":"ContainerStarted","Data":"7444f20081d1ca34d44f617de7534749c9529ec886533d8f195a2a0492acb46a"} Oct 06 21:35:47 crc kubenswrapper[5014]: I1006 21:35:47.149460 5014 generic.go:334] "Generic (PLEG): container finished" podID="41881b03-6274-4b6b-a652-32bfd7ea5be3" containerID="782636e822d692dceb133c62273093d59f357ca46af4eec242036acff46a24ea" exitCode=0 Oct 06 21:35:47 crc kubenswrapper[5014]: I1006 21:35:47.150611 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4kg57" event={"ID":"41881b03-6274-4b6b-a652-32bfd7ea5be3","Type":"ContainerDied","Data":"782636e822d692dceb133c62273093d59f357ca46af4eec242036acff46a24ea"} Oct 06 21:35:47 crc kubenswrapper[5014]: I1006 21:35:47.150719 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4kg57" event={"ID":"41881b03-6274-4b6b-a652-32bfd7ea5be3","Type":"ContainerStarted","Data":"90df76fc9a4b4617cdb9a1b84150a36b209a51ad968b3d86d6cbe5521c3d3533"} Oct 06 21:35:47 crc kubenswrapper[5014]: I1006 21:35:47.164466 5014 generic.go:334] "Generic (PLEG): container finished" podID="e2d1f2da-6708-4d69-ac51-4789097a2ae3" containerID="9be3be002c20b7f6e9c2770aeca9d610ef9f69768707bb8d556968a73d7b84e9" exitCode=0 Oct 06 21:35:47 crc kubenswrapper[5014]: I1006 21:35:47.164558 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6h862" event={"ID":"e2d1f2da-6708-4d69-ac51-4789097a2ae3","Type":"ContainerDied","Data":"9be3be002c20b7f6e9c2770aeca9d610ef9f69768707bb8d556968a73d7b84e9"} Oct 06 21:35:47 crc kubenswrapper[5014]: I1006 21:35:47.164657 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6h862" event={"ID":"e2d1f2da-6708-4d69-ac51-4789097a2ae3","Type":"ContainerStarted","Data":"d1f68bd8b8c5749216a779d6f14ae49d1e68ad5e9e505bc0387d4c528a6f0038"} Oct 06 21:35:47 crc kubenswrapper[5014]: I1006 21:35:47.170868 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-b2m7z" podStartSLOduration=1.653272722 podStartE2EDuration="4.170850242s" podCreationTimestamp="2025-10-06 21:35:43 +0000 UTC" firstStartedPulling="2025-10-06 21:35:44.115901388 +0000 UTC m=+289.408938132" lastFinishedPulling="2025-10-06 21:35:46.633478918 +0000 UTC m=+291.926515652" observedRunningTime="2025-10-06 21:35:47.168071647 +0000 UTC m=+292.461108401" watchObservedRunningTime="2025-10-06 21:35:47.170850242 +0000 UTC m=+292.463886976" Oct 06 21:35:50 crc kubenswrapper[5014]: I1006 21:35:50.185560 5014 generic.go:334] "Generic (PLEG): container finished" podID="41881b03-6274-4b6b-a652-32bfd7ea5be3" containerID="e5a7226167335f9d61dbc99cae357a0006192140f62b39ab43dd50e206a6586f" exitCode=0 Oct 06 21:35:50 crc kubenswrapper[5014]: I1006 21:35:50.185688 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4kg57" event={"ID":"41881b03-6274-4b6b-a652-32bfd7ea5be3","Type":"ContainerDied","Data":"e5a7226167335f9d61dbc99cae357a0006192140f62b39ab43dd50e206a6586f"} Oct 06 21:35:50 crc kubenswrapper[5014]: I1006 21:35:50.192693 5014 generic.go:334] "Generic (PLEG): container finished" podID="e2d1f2da-6708-4d69-ac51-4789097a2ae3" containerID="2b75b50085b986b302df61fe12658b73b45ec8937f74912885b82f92b01a1e1f" exitCode=0 Oct 06 21:35:50 crc kubenswrapper[5014]: I1006 21:35:50.192743 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6h862" event={"ID":"e2d1f2da-6708-4d69-ac51-4789097a2ae3","Type":"ContainerDied","Data":"2b75b50085b986b302df61fe12658b73b45ec8937f74912885b82f92b01a1e1f"} Oct 06 21:35:51 crc kubenswrapper[5014]: I1006 21:35:51.198504 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4kg57" event={"ID":"41881b03-6274-4b6b-a652-32bfd7ea5be3","Type":"ContainerStarted","Data":"1503c320b625c19ff6324b843b2afc14a9d82ce59d1dc206c67448afb80ec86c"} Oct 06 21:35:51 crc kubenswrapper[5014]: I1006 21:35:51.200824 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6h862" event={"ID":"e2d1f2da-6708-4d69-ac51-4789097a2ae3","Type":"ContainerStarted","Data":"f2cdaf73ab48ebaebbe0d8b141b7957bf8a4a5a14a3b05147a13e979f6061263"} Oct 06 21:35:51 crc kubenswrapper[5014]: I1006 21:35:51.292079 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6h862" podStartSLOduration=2.85720353 podStartE2EDuration="6.292048493s" podCreationTimestamp="2025-10-06 21:35:45 +0000 UTC" firstStartedPulling="2025-10-06 21:35:47.175892483 +0000 UTC m=+292.468929217" lastFinishedPulling="2025-10-06 21:35:50.610737446 +0000 UTC m=+295.903774180" observedRunningTime="2025-10-06 21:35:51.289633611 +0000 UTC m=+296.582670345" watchObservedRunningTime="2025-10-06 21:35:51.292048493 +0000 UTC m=+296.585085227" Oct 06 21:35:51 crc kubenswrapper[5014]: I1006 21:35:51.293764 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4kg57" podStartSLOduration=2.790295156 podStartE2EDuration="6.293755231s" podCreationTimestamp="2025-10-06 21:35:45 +0000 UTC" firstStartedPulling="2025-10-06 21:35:47.153141897 +0000 UTC m=+292.446178671" lastFinishedPulling="2025-10-06 21:35:50.656602012 +0000 UTC m=+295.949638746" observedRunningTime="2025-10-06 21:35:51.264254535 +0000 UTC m=+296.557291279" watchObservedRunningTime="2025-10-06 21:35:51.293755231 +0000 UTC m=+296.586791965" Oct 06 21:35:53 crc kubenswrapper[5014]: I1006 21:35:53.429332 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-krdbw" Oct 06 21:35:53 crc kubenswrapper[5014]: I1006 21:35:53.430123 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-krdbw" Oct 06 21:35:53 crc kubenswrapper[5014]: I1006 21:35:53.495163 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-krdbw" Oct 06 21:35:53 crc kubenswrapper[5014]: I1006 21:35:53.596341 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-b2m7z" Oct 06 21:35:53 crc kubenswrapper[5014]: I1006 21:35:53.596771 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-b2m7z" Oct 06 21:35:53 crc kubenswrapper[5014]: I1006 21:35:53.669779 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-b2m7z" Oct 06 21:35:54 crc kubenswrapper[5014]: I1006 21:35:54.270102 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-b2m7z" Oct 06 21:35:54 crc kubenswrapper[5014]: I1006 21:35:54.290276 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-krdbw" Oct 06 21:35:55 crc kubenswrapper[5014]: I1006 21:35:55.800264 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6h862" Oct 06 21:35:55 crc kubenswrapper[5014]: I1006 21:35:55.801409 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6h862" Oct 06 21:35:55 crc kubenswrapper[5014]: I1006 21:35:55.866849 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6h862" Oct 06 21:35:56 crc kubenswrapper[5014]: I1006 21:35:56.003528 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4kg57" Oct 06 21:35:56 crc kubenswrapper[5014]: I1006 21:35:56.003605 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4kg57" Oct 06 21:35:56 crc kubenswrapper[5014]: I1006 21:35:56.063083 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4kg57" Oct 06 21:35:56 crc kubenswrapper[5014]: I1006 21:35:56.281279 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6h862" Oct 06 21:35:56 crc kubenswrapper[5014]: I1006 21:35:56.307729 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4kg57" Oct 06 21:36:21 crc kubenswrapper[5014]: I1006 21:36:21.735797 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:36:21 crc kubenswrapper[5014]: I1006 21:36:21.736534 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:36:51 crc kubenswrapper[5014]: I1006 21:36:51.735800 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:36:51 crc kubenswrapper[5014]: I1006 21:36:51.736697 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:37:21 crc kubenswrapper[5014]: I1006 21:37:21.734999 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:37:21 crc kubenswrapper[5014]: I1006 21:37:21.735749 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:37:21 crc kubenswrapper[5014]: I1006 21:37:21.735820 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:37:21 crc kubenswrapper[5014]: I1006 21:37:21.736821 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4ae7b585fe6c43eb13afc90190430d2c1a49289fda17e2790f4be6b28606bf93"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 21:37:21 crc kubenswrapper[5014]: I1006 21:37:21.736949 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://4ae7b585fe6c43eb13afc90190430d2c1a49289fda17e2790f4be6b28606bf93" gracePeriod=600 Oct 06 21:37:22 crc kubenswrapper[5014]: I1006 21:37:22.860314 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="4ae7b585fe6c43eb13afc90190430d2c1a49289fda17e2790f4be6b28606bf93" exitCode=0 Oct 06 21:37:22 crc kubenswrapper[5014]: I1006 21:37:22.860395 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"4ae7b585fe6c43eb13afc90190430d2c1a49289fda17e2790f4be6b28606bf93"} Oct 06 21:37:22 crc kubenswrapper[5014]: I1006 21:37:22.861096 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"2875d948827114f05dc24f63c808bf8cba2bc805a548fae859881eca3487971f"} Oct 06 21:37:22 crc kubenswrapper[5014]: I1006 21:37:22.861146 5014 scope.go:117] "RemoveContainer" containerID="c6652c5c5096c8b187b04042b23178ab0e780dcb3495b96b03ceeace7373d20c" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.777425 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-jh87z"] Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.778800 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.791523 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-jh87z"] Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.857094 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5b185189-d2c7-4783-b22b-54076c321400-bound-sa-token\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.857163 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mtg8\" (UniqueName: \"kubernetes.io/projected/5b185189-d2c7-4783-b22b-54076c321400-kube-api-access-7mtg8\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.857211 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.857229 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5b185189-d2c7-4783-b22b-54076c321400-ca-trust-extracted\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.857257 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5b185189-d2c7-4783-b22b-54076c321400-registry-tls\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.857301 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5b185189-d2c7-4783-b22b-54076c321400-trusted-ca\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.857327 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5b185189-d2c7-4783-b22b-54076c321400-installation-pull-secrets\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.857345 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5b185189-d2c7-4783-b22b-54076c321400-registry-certificates\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.877462 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.958532 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5b185189-d2c7-4783-b22b-54076c321400-bound-sa-token\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.958592 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mtg8\" (UniqueName: \"kubernetes.io/projected/5b185189-d2c7-4783-b22b-54076c321400-kube-api-access-7mtg8\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.958658 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5b185189-d2c7-4783-b22b-54076c321400-ca-trust-extracted\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.958689 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5b185189-d2c7-4783-b22b-54076c321400-registry-tls\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.958708 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5b185189-d2c7-4783-b22b-54076c321400-trusted-ca\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.958741 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5b185189-d2c7-4783-b22b-54076c321400-installation-pull-secrets\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.958763 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5b185189-d2c7-4783-b22b-54076c321400-registry-certificates\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.960031 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5b185189-d2c7-4783-b22b-54076c321400-trusted-ca\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.960128 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/5b185189-d2c7-4783-b22b-54076c321400-registry-certificates\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.960357 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/5b185189-d2c7-4783-b22b-54076c321400-ca-trust-extracted\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.965985 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/5b185189-d2c7-4783-b22b-54076c321400-registry-tls\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.966057 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/5b185189-d2c7-4783-b22b-54076c321400-installation-pull-secrets\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.977014 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5b185189-d2c7-4783-b22b-54076c321400-bound-sa-token\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:47 crc kubenswrapper[5014]: I1006 21:38:47.978834 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mtg8\" (UniqueName: \"kubernetes.io/projected/5b185189-d2c7-4783-b22b-54076c321400-kube-api-access-7mtg8\") pod \"image-registry-66df7c8f76-jh87z\" (UID: \"5b185189-d2c7-4783-b22b-54076c321400\") " pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:48 crc kubenswrapper[5014]: I1006 21:38:48.097692 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:48 crc kubenswrapper[5014]: I1006 21:38:48.334684 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-jh87z"] Oct 06 21:38:48 crc kubenswrapper[5014]: I1006 21:38:48.494011 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" event={"ID":"5b185189-d2c7-4783-b22b-54076c321400","Type":"ContainerStarted","Data":"2b60ff6315d4b2bff328e6021bdbf733643199feb438b91ae658c3110fc9dc71"} Oct 06 21:38:49 crc kubenswrapper[5014]: I1006 21:38:49.502590 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" event={"ID":"5b185189-d2c7-4783-b22b-54076c321400","Type":"ContainerStarted","Data":"b7c7c6d70ba0c73c5f25e20ab5eb7c38cdef20f289afe60a45d533d563d3f4e9"} Oct 06 21:38:49 crc kubenswrapper[5014]: I1006 21:38:49.503778 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:38:49 crc kubenswrapper[5014]: I1006 21:38:49.538226 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" podStartSLOduration=2.538196724 podStartE2EDuration="2.538196724s" podCreationTimestamp="2025-10-06 21:38:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:38:49.533932777 +0000 UTC m=+474.826969551" watchObservedRunningTime="2025-10-06 21:38:49.538196724 +0000 UTC m=+474.831233498" Oct 06 21:39:08 crc kubenswrapper[5014]: I1006 21:39:08.109234 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-jh87z" Oct 06 21:39:08 crc kubenswrapper[5014]: I1006 21:39:08.176964 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-297xp"] Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.236994 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-297xp" podUID="3693c72b-1c6a-4362-bda2-6d5ea365cd38" containerName="registry" containerID="cri-o://b3c9ab86406d8447ab28f7bbb8394d5351290adec3fb30215d866f8dcf4baea6" gracePeriod=30 Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.703095 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.812110 5014 generic.go:334] "Generic (PLEG): container finished" podID="3693c72b-1c6a-4362-bda2-6d5ea365cd38" containerID="b3c9ab86406d8447ab28f7bbb8394d5351290adec3fb30215d866f8dcf4baea6" exitCode=0 Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.812162 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-297xp" event={"ID":"3693c72b-1c6a-4362-bda2-6d5ea365cd38","Type":"ContainerDied","Data":"b3c9ab86406d8447ab28f7bbb8394d5351290adec3fb30215d866f8dcf4baea6"} Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.812201 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-297xp" event={"ID":"3693c72b-1c6a-4362-bda2-6d5ea365cd38","Type":"ContainerDied","Data":"8a39b81de1a634092fda18f8d73d6824e1c7af3670894d6e5bc08b26f4f135f7"} Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.812202 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-297xp" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.812227 5014 scope.go:117] "RemoveContainer" containerID="b3c9ab86406d8447ab28f7bbb8394d5351290adec3fb30215d866f8dcf4baea6" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.834495 5014 scope.go:117] "RemoveContainer" containerID="b3c9ab86406d8447ab28f7bbb8394d5351290adec3fb30215d866f8dcf4baea6" Oct 06 21:39:33 crc kubenswrapper[5014]: E1006 21:39:33.835028 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3c9ab86406d8447ab28f7bbb8394d5351290adec3fb30215d866f8dcf4baea6\": container with ID starting with b3c9ab86406d8447ab28f7bbb8394d5351290adec3fb30215d866f8dcf4baea6 not found: ID does not exist" containerID="b3c9ab86406d8447ab28f7bbb8394d5351290adec3fb30215d866f8dcf4baea6" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.835087 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3c9ab86406d8447ab28f7bbb8394d5351290adec3fb30215d866f8dcf4baea6"} err="failed to get container status \"b3c9ab86406d8447ab28f7bbb8394d5351290adec3fb30215d866f8dcf4baea6\": rpc error: code = NotFound desc = could not find container \"b3c9ab86406d8447ab28f7bbb8394d5351290adec3fb30215d866f8dcf4baea6\": container with ID starting with b3c9ab86406d8447ab28f7bbb8394d5351290adec3fb30215d866f8dcf4baea6 not found: ID does not exist" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.890970 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3693c72b-1c6a-4362-bda2-6d5ea365cd38-trusted-ca\") pod \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.891052 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3693c72b-1c6a-4362-bda2-6d5ea365cd38-registry-certificates\") pod \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.891139 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-registry-tls\") pod \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.891204 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-bound-sa-token\") pod \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.891272 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3693c72b-1c6a-4362-bda2-6d5ea365cd38-ca-trust-extracted\") pod \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.891324 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3693c72b-1c6a-4362-bda2-6d5ea365cd38-installation-pull-secrets\") pod \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.891376 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2qpv\" (UniqueName: \"kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-kube-api-access-q2qpv\") pod \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.891605 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\" (UID: \"3693c72b-1c6a-4362-bda2-6d5ea365cd38\") " Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.893332 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3693c72b-1c6a-4362-bda2-6d5ea365cd38-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "3693c72b-1c6a-4362-bda2-6d5ea365cd38" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.893644 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3693c72b-1c6a-4362-bda2-6d5ea365cd38-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "3693c72b-1c6a-4362-bda2-6d5ea365cd38" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.901353 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3693c72b-1c6a-4362-bda2-6d5ea365cd38-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "3693c72b-1c6a-4362-bda2-6d5ea365cd38" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.902801 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "3693c72b-1c6a-4362-bda2-6d5ea365cd38" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.903027 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-kube-api-access-q2qpv" (OuterVolumeSpecName: "kube-api-access-q2qpv") pod "3693c72b-1c6a-4362-bda2-6d5ea365cd38" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38"). InnerVolumeSpecName "kube-api-access-q2qpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.903914 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "3693c72b-1c6a-4362-bda2-6d5ea365cd38" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.907166 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3693c72b-1c6a-4362-bda2-6d5ea365cd38-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "3693c72b-1c6a-4362-bda2-6d5ea365cd38" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.915068 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "3693c72b-1c6a-4362-bda2-6d5ea365cd38" (UID: "3693c72b-1c6a-4362-bda2-6d5ea365cd38"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.992985 5014 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3693c72b-1c6a-4362-bda2-6d5ea365cd38-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.993287 5014 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3693c72b-1c6a-4362-bda2-6d5ea365cd38-registry-certificates\") on node \"crc\" DevicePath \"\"" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.993373 5014 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-registry-tls\") on node \"crc\" DevicePath \"\"" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.993442 5014 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.993507 5014 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3693c72b-1c6a-4362-bda2-6d5ea365cd38-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.993572 5014 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3693c72b-1c6a-4362-bda2-6d5ea365cd38-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Oct 06 21:39:33 crc kubenswrapper[5014]: I1006 21:39:33.993685 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2qpv\" (UniqueName: \"kubernetes.io/projected/3693c72b-1c6a-4362-bda2-6d5ea365cd38-kube-api-access-q2qpv\") on node \"crc\" DevicePath \"\"" Oct 06 21:39:34 crc kubenswrapper[5014]: I1006 21:39:34.165656 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-297xp"] Oct 06 21:39:34 crc kubenswrapper[5014]: I1006 21:39:34.171175 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-297xp"] Oct 06 21:39:35 crc kubenswrapper[5014]: I1006 21:39:35.506189 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3693c72b-1c6a-4362-bda2-6d5ea365cd38" path="/var/lib/kubelet/pods/3693c72b-1c6a-4362-bda2-6d5ea365cd38/volumes" Oct 06 21:39:51 crc kubenswrapper[5014]: I1006 21:39:51.735026 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:39:51 crc kubenswrapper[5014]: I1006 21:39:51.735654 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:40:21 crc kubenswrapper[5014]: I1006 21:40:21.735100 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:40:21 crc kubenswrapper[5014]: I1006 21:40:21.735763 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:40:51 crc kubenswrapper[5014]: I1006 21:40:51.735966 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:40:51 crc kubenswrapper[5014]: I1006 21:40:51.736726 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:40:51 crc kubenswrapper[5014]: I1006 21:40:51.736789 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:40:51 crc kubenswrapper[5014]: I1006 21:40:51.737554 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2875d948827114f05dc24f63c808bf8cba2bc805a548fae859881eca3487971f"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 21:40:51 crc kubenswrapper[5014]: I1006 21:40:51.737639 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://2875d948827114f05dc24f63c808bf8cba2bc805a548fae859881eca3487971f" gracePeriod=600 Oct 06 21:40:52 crc kubenswrapper[5014]: I1006 21:40:52.347856 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="2875d948827114f05dc24f63c808bf8cba2bc805a548fae859881eca3487971f" exitCode=0 Oct 06 21:40:52 crc kubenswrapper[5014]: I1006 21:40:52.347976 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"2875d948827114f05dc24f63c808bf8cba2bc805a548fae859881eca3487971f"} Oct 06 21:40:52 crc kubenswrapper[5014]: I1006 21:40:52.348506 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"5d102177c0ce6793970da1b086882b162bbee28f8415589d9583b1584188a7df"} Oct 06 21:40:52 crc kubenswrapper[5014]: I1006 21:40:52.348559 5014 scope.go:117] "RemoveContainer" containerID="4ae7b585fe6c43eb13afc90190430d2c1a49289fda17e2790f4be6b28606bf93" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.195911 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-2wj75"] Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.197472 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovn-controller" containerID="cri-o://51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b" gracePeriod=30 Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.197579 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="nbdb" containerID="cri-o://1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055" gracePeriod=30 Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.197667 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b" gracePeriod=30 Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.197720 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovn-acl-logging" containerID="cri-o://60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3" gracePeriod=30 Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.197713 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="kube-rbac-proxy-node" containerID="cri-o://54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0" gracePeriod=30 Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.197805 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="sbdb" containerID="cri-o://087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e" gracePeriod=30 Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.197921 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="northd" containerID="cri-o://9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3" gracePeriod=30 Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.244998 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovnkube-controller" containerID="cri-o://72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f" gracePeriod=30 Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.479139 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055 is running failed: container process not found" containerID="1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.479155 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e is running failed: container process not found" containerID="087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.479789 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055 is running failed: container process not found" containerID="1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.479826 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e is running failed: container process not found" containerID="087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.480150 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e is running failed: container process not found" containerID="087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.480175 5014 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="sbdb" Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.480324 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055 is running failed: container process not found" containerID="1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.480349 5014 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055 is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="nbdb" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.560119 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovnkube-controller/3.log" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.568907 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovn-acl-logging/0.log" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.569877 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovn-controller/0.log" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.570458 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638069 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-tzlzg"] Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.638297 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="kube-rbac-proxy-node" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638312 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="kube-rbac-proxy-node" Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.638324 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="sbdb" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638334 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="sbdb" Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.638347 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="northd" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638355 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="northd" Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.638366 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovn-acl-logging" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638372 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovn-acl-logging" Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.638387 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="kubecfg-setup" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638394 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="kubecfg-setup" Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.638405 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="nbdb" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638413 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="nbdb" Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.638424 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovnkube-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638432 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovnkube-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.638444 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovnkube-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638451 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovnkube-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.638459 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovnkube-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638467 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovnkube-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.638482 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="kube-rbac-proxy-ovn-metrics" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638489 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="kube-rbac-proxy-ovn-metrics" Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.638497 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3693c72b-1c6a-4362-bda2-6d5ea365cd38" containerName="registry" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638505 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="3693c72b-1c6a-4362-bda2-6d5ea365cd38" containerName="registry" Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.638516 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovnkube-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638524 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovnkube-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.638532 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovn-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638539 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovn-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638676 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovnkube-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638690 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="nbdb" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638698 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovnkube-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638706 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovnkube-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638713 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovn-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638723 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="kube-rbac-proxy-node" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638731 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="northd" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638739 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovn-acl-logging" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638747 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovnkube-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638754 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="3693c72b-1c6a-4362-bda2-6d5ea365cd38" containerName="registry" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638763 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="sbdb" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638772 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="kube-rbac-proxy-ovn-metrics" Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.638877 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovnkube-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.638886 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovnkube-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.639001 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerName="ovnkube-controller" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.640915 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761025 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-openvswitch\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761108 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-etc-openvswitch\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761115 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761139 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-ovn\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761193 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-cni-bin\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761236 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-run-netns\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761231 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761279 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5d2de4ac-a423-4f5a-904a-817553f204f6-ovn-node-metrics-cert\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761281 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761250 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761326 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761335 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-cni-netd\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761358 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761373 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-ovnkube-config\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761410 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-var-lib-openvswitch\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761460 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-ovnkube-script-lib\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761493 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-kubelet\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761536 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761544 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gcbj4\" (UniqueName: \"kubernetes.io/projected/5d2de4ac-a423-4f5a-904a-817553f204f6-kube-api-access-gcbj4\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761610 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-run-ovn-kubernetes\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761655 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-systemd-units\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761679 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-env-overrides\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761730 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761750 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-systemd\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761765 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-log-socket\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761790 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-slash\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.761819 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-node-log\") pod \"5d2de4ac-a423-4f5a-904a-817553f204f6\" (UID: \"5d2de4ac-a423-4f5a-904a-817553f204f6\") " Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762018 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frbcb\" (UniqueName: \"kubernetes.io/projected/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-kube-api-access-frbcb\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762047 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-run-systemd\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762071 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-log-socket\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762092 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-kubelet\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762151 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-run-netns\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762179 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-run-ovn-kubernetes\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762239 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-etc-openvswitch\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762260 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-env-overrides\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762286 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-run-ovn\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762303 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-ovnkube-script-lib\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762325 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-run-openvswitch\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762342 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-slash\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762350 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762380 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-systemd-units\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762451 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-cni-netd\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762537 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762582 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762587 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762644 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-slash" (OuterVolumeSpecName: "host-slash") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762670 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762958 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762989 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-node-log" (OuterVolumeSpecName: "node-log") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.762994 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763070 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763447 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-ovn-node-metrics-cert\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763482 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-cni-bin\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763500 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-var-lib-openvswitch\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763532 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-ovnkube-config\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763583 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-node-log\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763679 5014 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763692 5014 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763705 5014 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-cni-bin\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763717 5014 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-run-netns\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763728 5014 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-cni-netd\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763740 5014 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-ovnkube-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763751 5014 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763762 5014 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763773 5014 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-kubelet\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763785 5014 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763796 5014 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-systemd-units\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763806 5014 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5d2de4ac-a423-4f5a-904a-817553f204f6-env-overrides\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763818 5014 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763830 5014 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-host-slash\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763842 5014 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-node-log\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763852 5014 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-openvswitch\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.763917 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-log-socket" (OuterVolumeSpecName: "log-socket") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.772710 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d2de4ac-a423-4f5a-904a-817553f204f6-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.772716 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d2de4ac-a423-4f5a-904a-817553f204f6-kube-api-access-gcbj4" (OuterVolumeSpecName: "kube-api-access-gcbj4") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "kube-api-access-gcbj4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.788045 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "5d2de4ac-a423-4f5a-904a-817553f204f6" (UID: "5d2de4ac-a423-4f5a-904a-817553f204f6"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.865478 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-etc-openvswitch\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.865556 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-etc-openvswitch\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.865567 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-env-overrides\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.865683 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-run-openvswitch\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.865717 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-run-ovn\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.865737 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-ovnkube-script-lib\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.865766 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-slash\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.865823 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-systemd-units\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.865929 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-cni-netd\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.865972 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-run-ovn\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866046 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866039 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-systemd-units\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.865996 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866086 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-slash\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866064 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-run-openvswitch\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866226 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-cni-netd\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866335 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-ovn-node-metrics-cert\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866424 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-cni-bin\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866458 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-var-lib-openvswitch\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866493 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-ovnkube-config\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866543 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-cni-bin\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866555 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-node-log\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866645 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-node-log\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866668 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-env-overrides\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866708 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-var-lib-openvswitch\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866735 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frbcb\" (UniqueName: \"kubernetes.io/projected/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-kube-api-access-frbcb\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866903 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-run-systemd\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.866964 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-log-socket\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.867013 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-kubelet\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.867022 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-run-systemd\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.867026 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-ovnkube-script-lib\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.867079 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-log-socket\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.867246 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-ovnkube-config\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.867515 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-kubelet\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.867667 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-run-netns\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.867673 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-run-netns\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.867740 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-run-ovn-kubernetes\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.867834 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-host-run-ovn-kubernetes\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.867937 5014 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5d2de4ac-a423-4f5a-904a-817553f204f6-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.867964 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gcbj4\" (UniqueName: \"kubernetes.io/projected/5d2de4ac-a423-4f5a-904a-817553f204f6-kube-api-access-gcbj4\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.867985 5014 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-run-systemd\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.868003 5014 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5d2de4ac-a423-4f5a-904a-817553f204f6-log-socket\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.871690 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-ovn-node-metrics-cert\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.897785 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frbcb\" (UniqueName: \"kubernetes.io/projected/c044ee58-b4d9-4f49-b1c6-f9efab2b0383-kube-api-access-frbcb\") pod \"ovnkube-node-tzlzg\" (UID: \"c044ee58-b4d9-4f49-b1c6-f9efab2b0383\") " pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.926265 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8ddbf_9f1464a5-d713-4f79-8248-33c69abcdac2/kube-multus/2.log" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.926994 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8ddbf_9f1464a5-d713-4f79-8248-33c69abcdac2/kube-multus/1.log" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.927036 5014 generic.go:334] "Generic (PLEG): container finished" podID="9f1464a5-d713-4f79-8248-33c69abcdac2" containerID="dbd4cf9bbd8472079722bf34bcdf563ce5e2cf12258cf52892b40da6cea24571" exitCode=2 Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.927116 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8ddbf" event={"ID":"9f1464a5-d713-4f79-8248-33c69abcdac2","Type":"ContainerDied","Data":"dbd4cf9bbd8472079722bf34bcdf563ce5e2cf12258cf52892b40da6cea24571"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.927198 5014 scope.go:117] "RemoveContainer" containerID="c0cf1ba6616443cb4de3d3036bcacb1b1672bcd98fc50e2162c40b5fdfcb7583" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.928022 5014 scope.go:117] "RemoveContainer" containerID="dbd4cf9bbd8472079722bf34bcdf563ce5e2cf12258cf52892b40da6cea24571" Oct 06 21:42:16 crc kubenswrapper[5014]: E1006 21:42:16.928339 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-8ddbf_openshift-multus(9f1464a5-d713-4f79-8248-33c69abcdac2)\"" pod="openshift-multus/multus-8ddbf" podUID="9f1464a5-d713-4f79-8248-33c69abcdac2" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.932074 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovnkube-controller/3.log" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.935890 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovn-acl-logging/0.log" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.936457 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2wj75_5d2de4ac-a423-4f5a-904a-817553f204f6/ovn-controller/0.log" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.936968 5014 generic.go:334] "Generic (PLEG): container finished" podID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerID="72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f" exitCode=0 Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937050 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerDied","Data":"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937087 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerDied","Data":"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937053 5014 generic.go:334] "Generic (PLEG): container finished" podID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerID="087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e" exitCode=0 Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937103 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937127 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerDied","Data":"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937117 5014 generic.go:334] "Generic (PLEG): container finished" podID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerID="1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055" exitCode=0 Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937217 5014 generic.go:334] "Generic (PLEG): container finished" podID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerID="9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3" exitCode=0 Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937228 5014 generic.go:334] "Generic (PLEG): container finished" podID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerID="e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b" exitCode=0 Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937237 5014 generic.go:334] "Generic (PLEG): container finished" podID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerID="54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0" exitCode=0 Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937246 5014 generic.go:334] "Generic (PLEG): container finished" podID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerID="60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3" exitCode=143 Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937255 5014 generic.go:334] "Generic (PLEG): container finished" podID="5d2de4ac-a423-4f5a-904a-817553f204f6" containerID="51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b" exitCode=143 Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937271 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerDied","Data":"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937285 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerDied","Data":"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937298 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerDied","Data":"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937311 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937323 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937330 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937338 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937344 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937351 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937357 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937364 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937370 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937376 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937387 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerDied","Data":"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937397 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937406 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937414 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937422 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937430 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937437 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937445 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937452 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937459 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937468 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937477 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerDied","Data":"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937487 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937495 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937502 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937509 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937515 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937522 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937529 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937535 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937542 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937549 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937557 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2wj75" event={"ID":"5d2de4ac-a423-4f5a-904a-817553f204f6","Type":"ContainerDied","Data":"cbb0bac5c2e6aaec1523e691ea7627fc73d4a57d2fcce6fc9f8a39726825a775"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937567 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937575 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937582 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937589 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937599 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937606 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937627 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937635 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937642 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.937649 5014 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114"} Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.972057 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.978878 5014 scope.go:117] "RemoveContainer" containerID="72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f" Oct 06 21:42:16 crc kubenswrapper[5014]: I1006 21:42:16.999156 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-2wj75"] Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.005601 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-2wj75"] Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.017396 5014 scope.go:117] "RemoveContainer" containerID="f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.050182 5014 scope.go:117] "RemoveContainer" containerID="087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.079529 5014 scope.go:117] "RemoveContainer" containerID="1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.098582 5014 scope.go:117] "RemoveContainer" containerID="9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.125480 5014 scope.go:117] "RemoveContainer" containerID="e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.153236 5014 scope.go:117] "RemoveContainer" containerID="54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.169831 5014 scope.go:117] "RemoveContainer" containerID="60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.208327 5014 scope.go:117] "RemoveContainer" containerID="51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.283580 5014 scope.go:117] "RemoveContainer" containerID="7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.302190 5014 scope.go:117] "RemoveContainer" containerID="72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f" Oct 06 21:42:17 crc kubenswrapper[5014]: E1006 21:42:17.302730 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f\": container with ID starting with 72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f not found: ID does not exist" containerID="72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.302781 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f"} err="failed to get container status \"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f\": rpc error: code = NotFound desc = could not find container \"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f\": container with ID starting with 72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.302810 5014 scope.go:117] "RemoveContainer" containerID="f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828" Oct 06 21:42:17 crc kubenswrapper[5014]: E1006 21:42:17.303184 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\": container with ID starting with f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828 not found: ID does not exist" containerID="f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.303233 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828"} err="failed to get container status \"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\": rpc error: code = NotFound desc = could not find container \"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\": container with ID starting with f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.303273 5014 scope.go:117] "RemoveContainer" containerID="087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e" Oct 06 21:42:17 crc kubenswrapper[5014]: E1006 21:42:17.303753 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\": container with ID starting with 087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e not found: ID does not exist" containerID="087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.303785 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e"} err="failed to get container status \"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\": rpc error: code = NotFound desc = could not find container \"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\": container with ID starting with 087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.303811 5014 scope.go:117] "RemoveContainer" containerID="1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055" Oct 06 21:42:17 crc kubenswrapper[5014]: E1006 21:42:17.305074 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\": container with ID starting with 1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055 not found: ID does not exist" containerID="1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.305126 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055"} err="failed to get container status \"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\": rpc error: code = NotFound desc = could not find container \"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\": container with ID starting with 1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.305159 5014 scope.go:117] "RemoveContainer" containerID="9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3" Oct 06 21:42:17 crc kubenswrapper[5014]: E1006 21:42:17.305576 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\": container with ID starting with 9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3 not found: ID does not exist" containerID="9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.305612 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3"} err="failed to get container status \"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\": rpc error: code = NotFound desc = could not find container \"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\": container with ID starting with 9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.305662 5014 scope.go:117] "RemoveContainer" containerID="e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b" Oct 06 21:42:17 crc kubenswrapper[5014]: E1006 21:42:17.306111 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\": container with ID starting with e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b not found: ID does not exist" containerID="e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.306140 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b"} err="failed to get container status \"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\": rpc error: code = NotFound desc = could not find container \"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\": container with ID starting with e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.306162 5014 scope.go:117] "RemoveContainer" containerID="54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0" Oct 06 21:42:17 crc kubenswrapper[5014]: E1006 21:42:17.306570 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\": container with ID starting with 54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0 not found: ID does not exist" containerID="54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.306652 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0"} err="failed to get container status \"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\": rpc error: code = NotFound desc = could not find container \"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\": container with ID starting with 54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.306682 5014 scope.go:117] "RemoveContainer" containerID="60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3" Oct 06 21:42:17 crc kubenswrapper[5014]: E1006 21:42:17.307014 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\": container with ID starting with 60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3 not found: ID does not exist" containerID="60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.307057 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3"} err="failed to get container status \"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\": rpc error: code = NotFound desc = could not find container \"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\": container with ID starting with 60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.307078 5014 scope.go:117] "RemoveContainer" containerID="51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b" Oct 06 21:42:17 crc kubenswrapper[5014]: E1006 21:42:17.307393 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\": container with ID starting with 51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b not found: ID does not exist" containerID="51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.307435 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b"} err="failed to get container status \"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\": rpc error: code = NotFound desc = could not find container \"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\": container with ID starting with 51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.307463 5014 scope.go:117] "RemoveContainer" containerID="7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114" Oct 06 21:42:17 crc kubenswrapper[5014]: E1006 21:42:17.307825 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\": container with ID starting with 7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114 not found: ID does not exist" containerID="7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.307858 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114"} err="failed to get container status \"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\": rpc error: code = NotFound desc = could not find container \"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\": container with ID starting with 7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.307874 5014 scope.go:117] "RemoveContainer" containerID="72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.308167 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f"} err="failed to get container status \"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f\": rpc error: code = NotFound desc = could not find container \"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f\": container with ID starting with 72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.308193 5014 scope.go:117] "RemoveContainer" containerID="f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.308512 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828"} err="failed to get container status \"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\": rpc error: code = NotFound desc = could not find container \"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\": container with ID starting with f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.308551 5014 scope.go:117] "RemoveContainer" containerID="087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.308904 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e"} err="failed to get container status \"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\": rpc error: code = NotFound desc = could not find container \"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\": container with ID starting with 087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.308939 5014 scope.go:117] "RemoveContainer" containerID="1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.309438 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055"} err="failed to get container status \"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\": rpc error: code = NotFound desc = could not find container \"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\": container with ID starting with 1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.309471 5014 scope.go:117] "RemoveContainer" containerID="9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.309787 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3"} err="failed to get container status \"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\": rpc error: code = NotFound desc = could not find container \"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\": container with ID starting with 9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.309823 5014 scope.go:117] "RemoveContainer" containerID="e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.310236 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b"} err="failed to get container status \"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\": rpc error: code = NotFound desc = could not find container \"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\": container with ID starting with e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.310266 5014 scope.go:117] "RemoveContainer" containerID="54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.310548 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0"} err="failed to get container status \"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\": rpc error: code = NotFound desc = could not find container \"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\": container with ID starting with 54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.310574 5014 scope.go:117] "RemoveContainer" containerID="60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.310897 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3"} err="failed to get container status \"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\": rpc error: code = NotFound desc = could not find container \"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\": container with ID starting with 60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.310928 5014 scope.go:117] "RemoveContainer" containerID="51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.311186 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b"} err="failed to get container status \"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\": rpc error: code = NotFound desc = could not find container \"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\": container with ID starting with 51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.311216 5014 scope.go:117] "RemoveContainer" containerID="7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.311678 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114"} err="failed to get container status \"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\": rpc error: code = NotFound desc = could not find container \"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\": container with ID starting with 7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.311708 5014 scope.go:117] "RemoveContainer" containerID="72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.312054 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f"} err="failed to get container status \"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f\": rpc error: code = NotFound desc = could not find container \"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f\": container with ID starting with 72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.312085 5014 scope.go:117] "RemoveContainer" containerID="f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.312406 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828"} err="failed to get container status \"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\": rpc error: code = NotFound desc = could not find container \"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\": container with ID starting with f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.312438 5014 scope.go:117] "RemoveContainer" containerID="087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.312836 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e"} err="failed to get container status \"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\": rpc error: code = NotFound desc = could not find container \"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\": container with ID starting with 087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.312875 5014 scope.go:117] "RemoveContainer" containerID="1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.313197 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055"} err="failed to get container status \"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\": rpc error: code = NotFound desc = could not find container \"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\": container with ID starting with 1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.313229 5014 scope.go:117] "RemoveContainer" containerID="9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.313600 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3"} err="failed to get container status \"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\": rpc error: code = NotFound desc = could not find container \"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\": container with ID starting with 9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.313656 5014 scope.go:117] "RemoveContainer" containerID="e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.313982 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b"} err="failed to get container status \"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\": rpc error: code = NotFound desc = could not find container \"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\": container with ID starting with e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.314011 5014 scope.go:117] "RemoveContainer" containerID="54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.314298 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0"} err="failed to get container status \"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\": rpc error: code = NotFound desc = could not find container \"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\": container with ID starting with 54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.314326 5014 scope.go:117] "RemoveContainer" containerID="60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.314701 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3"} err="failed to get container status \"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\": rpc error: code = NotFound desc = could not find container \"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\": container with ID starting with 60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.314737 5014 scope.go:117] "RemoveContainer" containerID="51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.315054 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b"} err="failed to get container status \"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\": rpc error: code = NotFound desc = could not find container \"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\": container with ID starting with 51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.315084 5014 scope.go:117] "RemoveContainer" containerID="7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.315452 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114"} err="failed to get container status \"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\": rpc error: code = NotFound desc = could not find container \"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\": container with ID starting with 7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.315490 5014 scope.go:117] "RemoveContainer" containerID="72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.315833 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f"} err="failed to get container status \"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f\": rpc error: code = NotFound desc = could not find container \"72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f\": container with ID starting with 72a7c4739faec8f6b282fcdee2893833cd28b218f2aa5a29f0ee94edc70e971f not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.315868 5014 scope.go:117] "RemoveContainer" containerID="f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.316376 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828"} err="failed to get container status \"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\": rpc error: code = NotFound desc = could not find container \"f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828\": container with ID starting with f676eb2eae036fb0e05588b884aad00f4cee99e27bb39dec4dabd694f05d7828 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.316481 5014 scope.go:117] "RemoveContainer" containerID="087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.316849 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e"} err="failed to get container status \"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\": rpc error: code = NotFound desc = could not find container \"087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e\": container with ID starting with 087a804971328719fd30a4ea87e4eb2325cb7771ab94072d16d919fdcd578f4e not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.316874 5014 scope.go:117] "RemoveContainer" containerID="1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.317178 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055"} err="failed to get container status \"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\": rpc error: code = NotFound desc = could not find container \"1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055\": container with ID starting with 1330623e2767d2b7cbbfd4cee756b043cb809197a8146e39f33ec1a446fcc055 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.317213 5014 scope.go:117] "RemoveContainer" containerID="9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.317532 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3"} err="failed to get container status \"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\": rpc error: code = NotFound desc = could not find container \"9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3\": container with ID starting with 9ca485fea517d0098b99919abf121b6e6838d306b3207e2b06469530c71006d3 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.317558 5014 scope.go:117] "RemoveContainer" containerID="e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.318004 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b"} err="failed to get container status \"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\": rpc error: code = NotFound desc = could not find container \"e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b\": container with ID starting with e5642201f28ea2f35f9c9cf2d82e40abb9b93544459080dcace2f50ded0c8d1b not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.318031 5014 scope.go:117] "RemoveContainer" containerID="54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.318345 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0"} err="failed to get container status \"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\": rpc error: code = NotFound desc = could not find container \"54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0\": container with ID starting with 54698dc685f3dd8c96ad7295c591112c64eb4677aa5903817ec399da56daaac0 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.318373 5014 scope.go:117] "RemoveContainer" containerID="60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.318894 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3"} err="failed to get container status \"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\": rpc error: code = NotFound desc = could not find container \"60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3\": container with ID starting with 60180fb428d5751594acca9bd796128d940a94b0c22e9e727e32cb721f867bc3 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.318918 5014 scope.go:117] "RemoveContainer" containerID="51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.319250 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b"} err="failed to get container status \"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\": rpc error: code = NotFound desc = could not find container \"51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b\": container with ID starting with 51e9bd56f5fd0abfff166bcfb50f5e763ced87b919272947d6836e3a530e8d6b not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.319282 5014 scope.go:117] "RemoveContainer" containerID="7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.319573 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114"} err="failed to get container status \"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\": rpc error: code = NotFound desc = could not find container \"7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114\": container with ID starting with 7c55a2707074c8cfd51d093eee1bd71366695ec8e4e8f692e55e2815af1da114 not found: ID does not exist" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.493549 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d2de4ac-a423-4f5a-904a-817553f204f6" path="/var/lib/kubelet/pods/5d2de4ac-a423-4f5a-904a-817553f204f6/volumes" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.948205 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8ddbf_9f1464a5-d713-4f79-8248-33c69abcdac2/kube-multus/2.log" Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.951022 5014 generic.go:334] "Generic (PLEG): container finished" podID="c044ee58-b4d9-4f49-b1c6-f9efab2b0383" containerID="ef6e7e09161c6b5dde27f32e38598ad8ed6869bfc9ca378e505d4fcf4836d9ba" exitCode=0 Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.951086 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" event={"ID":"c044ee58-b4d9-4f49-b1c6-f9efab2b0383","Type":"ContainerDied","Data":"ef6e7e09161c6b5dde27f32e38598ad8ed6869bfc9ca378e505d4fcf4836d9ba"} Oct 06 21:42:17 crc kubenswrapper[5014]: I1006 21:42:17.951143 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" event={"ID":"c044ee58-b4d9-4f49-b1c6-f9efab2b0383","Type":"ContainerStarted","Data":"81688ab57f4bfddfbad6e4b3aa5197258f1cbd4e0270fbf7b3fc9be104d592c5"} Oct 06 21:42:18 crc kubenswrapper[5014]: I1006 21:42:18.964982 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" event={"ID":"c044ee58-b4d9-4f49-b1c6-f9efab2b0383","Type":"ContainerStarted","Data":"937e5114bd17219d5179d56815162ac505bcc8327ef279caaf94309666988ec7"} Oct 06 21:42:18 crc kubenswrapper[5014]: I1006 21:42:18.965423 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" event={"ID":"c044ee58-b4d9-4f49-b1c6-f9efab2b0383","Type":"ContainerStarted","Data":"c3db8c22d6b6a71255ec0f0fd1ac5106a543366a54fef957946d91ffc7ad3b90"} Oct 06 21:42:18 crc kubenswrapper[5014]: I1006 21:42:18.965448 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" event={"ID":"c044ee58-b4d9-4f49-b1c6-f9efab2b0383","Type":"ContainerStarted","Data":"5348f1584ac38f712d82f84b610ffed6cd2566643dc5b5061e7b77ab6f17bb2f"} Oct 06 21:42:18 crc kubenswrapper[5014]: I1006 21:42:18.965468 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" event={"ID":"c044ee58-b4d9-4f49-b1c6-f9efab2b0383","Type":"ContainerStarted","Data":"38dba34a0a001cda8115c50b6c6cf7e37d41b36cee864d5010b73515736f5a1c"} Oct 06 21:42:18 crc kubenswrapper[5014]: I1006 21:42:18.965484 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" event={"ID":"c044ee58-b4d9-4f49-b1c6-f9efab2b0383","Type":"ContainerStarted","Data":"4b822f2505f1d83ec00192540116e3e988f075614ebf8c7fcaa498c2a9e68cef"} Oct 06 21:42:18 crc kubenswrapper[5014]: I1006 21:42:18.965502 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" event={"ID":"c044ee58-b4d9-4f49-b1c6-f9efab2b0383","Type":"ContainerStarted","Data":"32d3fce0862e973858609c196e5ece4c56957a1b89eae86e36edeff198c9aaf0"} Oct 06 21:42:20 crc kubenswrapper[5014]: I1006 21:42:20.095952 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-tsprc"] Oct 06 21:42:20 crc kubenswrapper[5014]: I1006 21:42:20.097686 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:20 crc kubenswrapper[5014]: I1006 21:42:20.101441 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Oct 06 21:42:20 crc kubenswrapper[5014]: I1006 21:42:20.101987 5014 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-vrc8d" Oct 06 21:42:20 crc kubenswrapper[5014]: I1006 21:42:20.102375 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Oct 06 21:42:20 crc kubenswrapper[5014]: I1006 21:42:20.102867 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Oct 06 21:42:20 crc kubenswrapper[5014]: I1006 21:42:20.229468 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/5447fba0-38f4-4e6e-b891-f44e97259e21-node-mnt\") pod \"crc-storage-crc-tsprc\" (UID: \"5447fba0-38f4-4e6e-b891-f44e97259e21\") " pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:20 crc kubenswrapper[5014]: I1006 21:42:20.229571 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpb8w\" (UniqueName: \"kubernetes.io/projected/5447fba0-38f4-4e6e-b891-f44e97259e21-kube-api-access-xpb8w\") pod \"crc-storage-crc-tsprc\" (UID: \"5447fba0-38f4-4e6e-b891-f44e97259e21\") " pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:20 crc kubenswrapper[5014]: I1006 21:42:20.229695 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/5447fba0-38f4-4e6e-b891-f44e97259e21-crc-storage\") pod \"crc-storage-crc-tsprc\" (UID: \"5447fba0-38f4-4e6e-b891-f44e97259e21\") " pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:20 crc kubenswrapper[5014]: I1006 21:42:20.330529 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpb8w\" (UniqueName: \"kubernetes.io/projected/5447fba0-38f4-4e6e-b891-f44e97259e21-kube-api-access-xpb8w\") pod \"crc-storage-crc-tsprc\" (UID: \"5447fba0-38f4-4e6e-b891-f44e97259e21\") " pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:20 crc kubenswrapper[5014]: I1006 21:42:20.330737 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/5447fba0-38f4-4e6e-b891-f44e97259e21-crc-storage\") pod \"crc-storage-crc-tsprc\" (UID: \"5447fba0-38f4-4e6e-b891-f44e97259e21\") " pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:20 crc kubenswrapper[5014]: I1006 21:42:20.330838 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/5447fba0-38f4-4e6e-b891-f44e97259e21-node-mnt\") pod \"crc-storage-crc-tsprc\" (UID: \"5447fba0-38f4-4e6e-b891-f44e97259e21\") " pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:20 crc kubenswrapper[5014]: I1006 21:42:20.331271 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/5447fba0-38f4-4e6e-b891-f44e97259e21-node-mnt\") pod \"crc-storage-crc-tsprc\" (UID: \"5447fba0-38f4-4e6e-b891-f44e97259e21\") " pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:20 crc kubenswrapper[5014]: I1006 21:42:20.331939 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/5447fba0-38f4-4e6e-b891-f44e97259e21-crc-storage\") pod \"crc-storage-crc-tsprc\" (UID: \"5447fba0-38f4-4e6e-b891-f44e97259e21\") " pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:20 crc kubenswrapper[5014]: I1006 21:42:20.364120 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpb8w\" (UniqueName: \"kubernetes.io/projected/5447fba0-38f4-4e6e-b891-f44e97259e21-kube-api-access-xpb8w\") pod \"crc-storage-crc-tsprc\" (UID: \"5447fba0-38f4-4e6e-b891-f44e97259e21\") " pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:20 crc kubenswrapper[5014]: I1006 21:42:20.428808 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:20 crc kubenswrapper[5014]: E1006 21:42:20.467808 5014 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-tsprc_crc-storage_5447fba0-38f4-4e6e-b891-f44e97259e21_0(a19c8474bdb3c696c013f08f361af1c90ac95709aaff4f7f508ab8461ab05ec0): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 06 21:42:20 crc kubenswrapper[5014]: E1006 21:42:20.467916 5014 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-tsprc_crc-storage_5447fba0-38f4-4e6e-b891-f44e97259e21_0(a19c8474bdb3c696c013f08f361af1c90ac95709aaff4f7f508ab8461ab05ec0): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:20 crc kubenswrapper[5014]: E1006 21:42:20.467952 5014 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-tsprc_crc-storage_5447fba0-38f4-4e6e-b891-f44e97259e21_0(a19c8474bdb3c696c013f08f361af1c90ac95709aaff4f7f508ab8461ab05ec0): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:20 crc kubenswrapper[5014]: E1006 21:42:20.468030 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-tsprc_crc-storage(5447fba0-38f4-4e6e-b891-f44e97259e21)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-tsprc_crc-storage(5447fba0-38f4-4e6e-b891-f44e97259e21)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-tsprc_crc-storage_5447fba0-38f4-4e6e-b891-f44e97259e21_0(a19c8474bdb3c696c013f08f361af1c90ac95709aaff4f7f508ab8461ab05ec0): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-tsprc" podUID="5447fba0-38f4-4e6e-b891-f44e97259e21" Oct 06 21:42:21 crc kubenswrapper[5014]: I1006 21:42:21.993840 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" event={"ID":"c044ee58-b4d9-4f49-b1c6-f9efab2b0383","Type":"ContainerStarted","Data":"790b18d3235e01aaf1ca337eb57f3a1f793a890c9da2e397fb0686f82daaeaf9"} Oct 06 21:42:24 crc kubenswrapper[5014]: I1006 21:42:24.014653 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" event={"ID":"c044ee58-b4d9-4f49-b1c6-f9efab2b0383","Type":"ContainerStarted","Data":"1d9372c915aaf20e8cdcb517c337747846a3e6c1749de02eda07392b8f4b9103"} Oct 06 21:42:24 crc kubenswrapper[5014]: I1006 21:42:24.015141 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:24 crc kubenswrapper[5014]: I1006 21:42:24.015165 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:24 crc kubenswrapper[5014]: I1006 21:42:24.015184 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:24 crc kubenswrapper[5014]: I1006 21:42:24.061517 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" podStartSLOduration=8.061499909 podStartE2EDuration="8.061499909s" podCreationTimestamp="2025-10-06 21:42:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:42:24.057454612 +0000 UTC m=+689.350491386" watchObservedRunningTime="2025-10-06 21:42:24.061499909 +0000 UTC m=+689.354536663" Oct 06 21:42:24 crc kubenswrapper[5014]: I1006 21:42:24.063849 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:24 crc kubenswrapper[5014]: I1006 21:42:24.070872 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:24 crc kubenswrapper[5014]: I1006 21:42:24.329551 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-tsprc"] Oct 06 21:42:24 crc kubenswrapper[5014]: I1006 21:42:24.329762 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:24 crc kubenswrapper[5014]: I1006 21:42:24.330360 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:24 crc kubenswrapper[5014]: E1006 21:42:24.364939 5014 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-tsprc_crc-storage_5447fba0-38f4-4e6e-b891-f44e97259e21_0(29981d90d626689e0f07e2da6ec2eb920168eaf2ccd3c88d21dd54db63183843): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 06 21:42:24 crc kubenswrapper[5014]: E1006 21:42:24.365012 5014 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-tsprc_crc-storage_5447fba0-38f4-4e6e-b891-f44e97259e21_0(29981d90d626689e0f07e2da6ec2eb920168eaf2ccd3c88d21dd54db63183843): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:24 crc kubenswrapper[5014]: E1006 21:42:24.365037 5014 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-tsprc_crc-storage_5447fba0-38f4-4e6e-b891-f44e97259e21_0(29981d90d626689e0f07e2da6ec2eb920168eaf2ccd3c88d21dd54db63183843): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:24 crc kubenswrapper[5014]: E1006 21:42:24.365094 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-tsprc_crc-storage(5447fba0-38f4-4e6e-b891-f44e97259e21)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-tsprc_crc-storage(5447fba0-38f4-4e6e-b891-f44e97259e21)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-tsprc_crc-storage_5447fba0-38f4-4e6e-b891-f44e97259e21_0(29981d90d626689e0f07e2da6ec2eb920168eaf2ccd3c88d21dd54db63183843): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-tsprc" podUID="5447fba0-38f4-4e6e-b891-f44e97259e21" Oct 06 21:42:31 crc kubenswrapper[5014]: I1006 21:42:31.484869 5014 scope.go:117] "RemoveContainer" containerID="dbd4cf9bbd8472079722bf34bcdf563ce5e2cf12258cf52892b40da6cea24571" Oct 06 21:42:31 crc kubenswrapper[5014]: E1006 21:42:31.486030 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-8ddbf_openshift-multus(9f1464a5-d713-4f79-8248-33c69abcdac2)\"" pod="openshift-multus/multus-8ddbf" podUID="9f1464a5-d713-4f79-8248-33c69abcdac2" Oct 06 21:42:35 crc kubenswrapper[5014]: I1006 21:42:35.483921 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:35 crc kubenswrapper[5014]: I1006 21:42:35.489753 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:35 crc kubenswrapper[5014]: E1006 21:42:35.535353 5014 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-tsprc_crc-storage_5447fba0-38f4-4e6e-b891-f44e97259e21_0(026b603b366e0d50a6c99dfc9775d114b20b7b2b4310512bb05c47aded213411): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 06 21:42:35 crc kubenswrapper[5014]: E1006 21:42:35.535472 5014 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-tsprc_crc-storage_5447fba0-38f4-4e6e-b891-f44e97259e21_0(026b603b366e0d50a6c99dfc9775d114b20b7b2b4310512bb05c47aded213411): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:35 crc kubenswrapper[5014]: E1006 21:42:35.535511 5014 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-tsprc_crc-storage_5447fba0-38f4-4e6e-b891-f44e97259e21_0(026b603b366e0d50a6c99dfc9775d114b20b7b2b4310512bb05c47aded213411): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:35 crc kubenswrapper[5014]: E1006 21:42:35.535576 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-tsprc_crc-storage(5447fba0-38f4-4e6e-b891-f44e97259e21)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-tsprc_crc-storage(5447fba0-38f4-4e6e-b891-f44e97259e21)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-tsprc_crc-storage_5447fba0-38f4-4e6e-b891-f44e97259e21_0(026b603b366e0d50a6c99dfc9775d114b20b7b2b4310512bb05c47aded213411): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-tsprc" podUID="5447fba0-38f4-4e6e-b891-f44e97259e21" Oct 06 21:42:46 crc kubenswrapper[5014]: I1006 21:42:46.484549 5014 scope.go:117] "RemoveContainer" containerID="dbd4cf9bbd8472079722bf34bcdf563ce5e2cf12258cf52892b40da6cea24571" Oct 06 21:42:47 crc kubenswrapper[5014]: I1006 21:42:47.004178 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tzlzg" Oct 06 21:42:47 crc kubenswrapper[5014]: I1006 21:42:47.170750 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8ddbf_9f1464a5-d713-4f79-8248-33c69abcdac2/kube-multus/2.log" Oct 06 21:42:47 crc kubenswrapper[5014]: I1006 21:42:47.170831 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8ddbf" event={"ID":"9f1464a5-d713-4f79-8248-33c69abcdac2","Type":"ContainerStarted","Data":"03e87e890a595fb0599c0b2ead6cf1fc2a0d5ab0a22244ada5b864fc8a0d99f9"} Oct 06 21:42:49 crc kubenswrapper[5014]: I1006 21:42:49.484227 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:49 crc kubenswrapper[5014]: I1006 21:42:49.484961 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:49 crc kubenswrapper[5014]: I1006 21:42:49.816845 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-tsprc"] Oct 06 21:42:49 crc kubenswrapper[5014]: W1006 21:42:49.830214 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5447fba0_38f4_4e6e_b891_f44e97259e21.slice/crio-ea22a9ca23ff614cefa72fea58fc12338a8317c4e99256df7824ed37358647fa WatchSource:0}: Error finding container ea22a9ca23ff614cefa72fea58fc12338a8317c4e99256df7824ed37358647fa: Status 404 returned error can't find the container with id ea22a9ca23ff614cefa72fea58fc12338a8317c4e99256df7824ed37358647fa Oct 06 21:42:49 crc kubenswrapper[5014]: I1006 21:42:49.834726 5014 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 06 21:42:50 crc kubenswrapper[5014]: I1006 21:42:50.191599 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-tsprc" event={"ID":"5447fba0-38f4-4e6e-b891-f44e97259e21","Type":"ContainerStarted","Data":"ea22a9ca23ff614cefa72fea58fc12338a8317c4e99256df7824ed37358647fa"} Oct 06 21:42:52 crc kubenswrapper[5014]: I1006 21:42:52.206301 5014 generic.go:334] "Generic (PLEG): container finished" podID="5447fba0-38f4-4e6e-b891-f44e97259e21" containerID="29ced8a93e901b500a5cc3c9a2015cf2a739b1a3ccd388d15c7a9a1f68a2b548" exitCode=0 Oct 06 21:42:52 crc kubenswrapper[5014]: I1006 21:42:52.206389 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-tsprc" event={"ID":"5447fba0-38f4-4e6e-b891-f44e97259e21","Type":"ContainerDied","Data":"29ced8a93e901b500a5cc3c9a2015cf2a739b1a3ccd388d15c7a9a1f68a2b548"} Oct 06 21:42:53 crc kubenswrapper[5014]: I1006 21:42:53.501925 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:42:53 crc kubenswrapper[5014]: I1006 21:42:53.534835 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/5447fba0-38f4-4e6e-b891-f44e97259e21-crc-storage\") pod \"5447fba0-38f4-4e6e-b891-f44e97259e21\" (UID: \"5447fba0-38f4-4e6e-b891-f44e97259e21\") " Oct 06 21:42:53 crc kubenswrapper[5014]: I1006 21:42:53.534945 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/5447fba0-38f4-4e6e-b891-f44e97259e21-node-mnt\") pod \"5447fba0-38f4-4e6e-b891-f44e97259e21\" (UID: \"5447fba0-38f4-4e6e-b891-f44e97259e21\") " Oct 06 21:42:53 crc kubenswrapper[5014]: I1006 21:42:53.534993 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xpb8w\" (UniqueName: \"kubernetes.io/projected/5447fba0-38f4-4e6e-b891-f44e97259e21-kube-api-access-xpb8w\") pod \"5447fba0-38f4-4e6e-b891-f44e97259e21\" (UID: \"5447fba0-38f4-4e6e-b891-f44e97259e21\") " Oct 06 21:42:53 crc kubenswrapper[5014]: I1006 21:42:53.535063 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5447fba0-38f4-4e6e-b891-f44e97259e21-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "5447fba0-38f4-4e6e-b891-f44e97259e21" (UID: "5447fba0-38f4-4e6e-b891-f44e97259e21"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:42:53 crc kubenswrapper[5014]: I1006 21:42:53.535195 5014 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/5447fba0-38f4-4e6e-b891-f44e97259e21-node-mnt\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:53 crc kubenswrapper[5014]: I1006 21:42:53.540252 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5447fba0-38f4-4e6e-b891-f44e97259e21-kube-api-access-xpb8w" (OuterVolumeSpecName: "kube-api-access-xpb8w") pod "5447fba0-38f4-4e6e-b891-f44e97259e21" (UID: "5447fba0-38f4-4e6e-b891-f44e97259e21"). InnerVolumeSpecName "kube-api-access-xpb8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:42:53 crc kubenswrapper[5014]: I1006 21:42:53.552744 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5447fba0-38f4-4e6e-b891-f44e97259e21-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "5447fba0-38f4-4e6e-b891-f44e97259e21" (UID: "5447fba0-38f4-4e6e-b891-f44e97259e21"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:42:53 crc kubenswrapper[5014]: I1006 21:42:53.637082 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xpb8w\" (UniqueName: \"kubernetes.io/projected/5447fba0-38f4-4e6e-b891-f44e97259e21-kube-api-access-xpb8w\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:53 crc kubenswrapper[5014]: I1006 21:42:53.637456 5014 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/5447fba0-38f4-4e6e-b891-f44e97259e21-crc-storage\") on node \"crc\" DevicePath \"\"" Oct 06 21:42:54 crc kubenswrapper[5014]: I1006 21:42:54.224104 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-tsprc" event={"ID":"5447fba0-38f4-4e6e-b891-f44e97259e21","Type":"ContainerDied","Data":"ea22a9ca23ff614cefa72fea58fc12338a8317c4e99256df7824ed37358647fa"} Oct 06 21:42:54 crc kubenswrapper[5014]: I1006 21:42:54.224550 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ea22a9ca23ff614cefa72fea58fc12338a8317c4e99256df7824ed37358647fa" Oct 06 21:42:54 crc kubenswrapper[5014]: I1006 21:42:54.224200 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-tsprc" Oct 06 21:43:02 crc kubenswrapper[5014]: I1006 21:43:02.417014 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf"] Oct 06 21:43:02 crc kubenswrapper[5014]: E1006 21:43:02.417878 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5447fba0-38f4-4e6e-b891-f44e97259e21" containerName="storage" Oct 06 21:43:02 crc kubenswrapper[5014]: I1006 21:43:02.417893 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5447fba0-38f4-4e6e-b891-f44e97259e21" containerName="storage" Oct 06 21:43:02 crc kubenswrapper[5014]: I1006 21:43:02.418002 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5447fba0-38f4-4e6e-b891-f44e97259e21" containerName="storage" Oct 06 21:43:02 crc kubenswrapper[5014]: I1006 21:43:02.418755 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" Oct 06 21:43:02 crc kubenswrapper[5014]: I1006 21:43:02.420313 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Oct 06 21:43:02 crc kubenswrapper[5014]: I1006 21:43:02.430021 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf"] Oct 06 21:43:02 crc kubenswrapper[5014]: I1006 21:43:02.484690 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-bundle\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf\" (UID: \"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" Oct 06 21:43:02 crc kubenswrapper[5014]: I1006 21:43:02.484937 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qz2xh\" (UniqueName: \"kubernetes.io/projected/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-kube-api-access-qz2xh\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf\" (UID: \"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" Oct 06 21:43:02 crc kubenswrapper[5014]: I1006 21:43:02.485263 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-util\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf\" (UID: \"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" Oct 06 21:43:02 crc kubenswrapper[5014]: I1006 21:43:02.587020 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-util\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf\" (UID: \"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" Oct 06 21:43:02 crc kubenswrapper[5014]: I1006 21:43:02.587320 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-bundle\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf\" (UID: \"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" Oct 06 21:43:02 crc kubenswrapper[5014]: I1006 21:43:02.587394 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qz2xh\" (UniqueName: \"kubernetes.io/projected/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-kube-api-access-qz2xh\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf\" (UID: \"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" Oct 06 21:43:02 crc kubenswrapper[5014]: I1006 21:43:02.587937 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-util\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf\" (UID: \"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" Oct 06 21:43:02 crc kubenswrapper[5014]: I1006 21:43:02.588475 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-bundle\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf\" (UID: \"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" Oct 06 21:43:02 crc kubenswrapper[5014]: I1006 21:43:02.613691 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qz2xh\" (UniqueName: \"kubernetes.io/projected/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-kube-api-access-qz2xh\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf\" (UID: \"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" Oct 06 21:43:02 crc kubenswrapper[5014]: I1006 21:43:02.740924 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" Oct 06 21:43:03 crc kubenswrapper[5014]: I1006 21:43:03.021605 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf"] Oct 06 21:43:03 crc kubenswrapper[5014]: I1006 21:43:03.287071 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" event={"ID":"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1","Type":"ContainerStarted","Data":"3984c0570b130e6d10b110f3860e33417c4efc1310ae9d327b4fa642638fd308"} Oct 06 21:43:03 crc kubenswrapper[5014]: I1006 21:43:03.287132 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" event={"ID":"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1","Type":"ContainerStarted","Data":"e0eb59566c31b27a3831d112a1fe99cf6e47fdf5f8cb94c18f9917455a861dcd"} Oct 06 21:43:04 crc kubenswrapper[5014]: I1006 21:43:04.300308 5014 generic.go:334] "Generic (PLEG): container finished" podID="f61ea3cc-1bff-48c1-bd62-79aaf9d617c1" containerID="3984c0570b130e6d10b110f3860e33417c4efc1310ae9d327b4fa642638fd308" exitCode=0 Oct 06 21:43:04 crc kubenswrapper[5014]: I1006 21:43:04.300422 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" event={"ID":"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1","Type":"ContainerDied","Data":"3984c0570b130e6d10b110f3860e33417c4efc1310ae9d327b4fa642638fd308"} Oct 06 21:43:06 crc kubenswrapper[5014]: I1006 21:43:06.313471 5014 generic.go:334] "Generic (PLEG): container finished" podID="f61ea3cc-1bff-48c1-bd62-79aaf9d617c1" containerID="32fe634c0f8adc9a902858b46236edf68d7121204367b373ad7ff2b48f9713cc" exitCode=0 Oct 06 21:43:06 crc kubenswrapper[5014]: I1006 21:43:06.313641 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" event={"ID":"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1","Type":"ContainerDied","Data":"32fe634c0f8adc9a902858b46236edf68d7121204367b373ad7ff2b48f9713cc"} Oct 06 21:43:07 crc kubenswrapper[5014]: I1006 21:43:07.325793 5014 generic.go:334] "Generic (PLEG): container finished" podID="f61ea3cc-1bff-48c1-bd62-79aaf9d617c1" containerID="d65c76240686d57e474cead63d9605b5c1c3db87ac4997c4136a0b08d732d79f" exitCode=0 Oct 06 21:43:07 crc kubenswrapper[5014]: I1006 21:43:07.326259 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" event={"ID":"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1","Type":"ContainerDied","Data":"d65c76240686d57e474cead63d9605b5c1c3db87ac4997c4136a0b08d732d79f"} Oct 06 21:43:08 crc kubenswrapper[5014]: I1006 21:43:08.644285 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" Oct 06 21:43:08 crc kubenswrapper[5014]: I1006 21:43:08.783041 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-bundle\") pod \"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1\" (UID: \"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1\") " Oct 06 21:43:08 crc kubenswrapper[5014]: I1006 21:43:08.783149 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-util\") pod \"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1\" (UID: \"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1\") " Oct 06 21:43:08 crc kubenswrapper[5014]: I1006 21:43:08.783262 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qz2xh\" (UniqueName: \"kubernetes.io/projected/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-kube-api-access-qz2xh\") pod \"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1\" (UID: \"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1\") " Oct 06 21:43:08 crc kubenswrapper[5014]: I1006 21:43:08.785008 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-bundle" (OuterVolumeSpecName: "bundle") pod "f61ea3cc-1bff-48c1-bd62-79aaf9d617c1" (UID: "f61ea3cc-1bff-48c1-bd62-79aaf9d617c1"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:43:08 crc kubenswrapper[5014]: I1006 21:43:08.790124 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-kube-api-access-qz2xh" (OuterVolumeSpecName: "kube-api-access-qz2xh") pod "f61ea3cc-1bff-48c1-bd62-79aaf9d617c1" (UID: "f61ea3cc-1bff-48c1-bd62-79aaf9d617c1"). InnerVolumeSpecName "kube-api-access-qz2xh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:43:08 crc kubenswrapper[5014]: I1006 21:43:08.885202 5014 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:08 crc kubenswrapper[5014]: I1006 21:43:08.885251 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qz2xh\" (UniqueName: \"kubernetes.io/projected/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-kube-api-access-qz2xh\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:08 crc kubenswrapper[5014]: I1006 21:43:08.961029 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-util" (OuterVolumeSpecName: "util") pod "f61ea3cc-1bff-48c1-bd62-79aaf9d617c1" (UID: "f61ea3cc-1bff-48c1-bd62-79aaf9d617c1"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:43:08 crc kubenswrapper[5014]: I1006 21:43:08.986230 5014 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f61ea3cc-1bff-48c1-bd62-79aaf9d617c1-util\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:09 crc kubenswrapper[5014]: I1006 21:43:09.347008 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" event={"ID":"f61ea3cc-1bff-48c1-bd62-79aaf9d617c1","Type":"ContainerDied","Data":"e0eb59566c31b27a3831d112a1fe99cf6e47fdf5f8cb94c18f9917455a861dcd"} Oct 06 21:43:09 crc kubenswrapper[5014]: I1006 21:43:09.347080 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e0eb59566c31b27a3831d112a1fe99cf6e47fdf5f8cb94c18f9917455a861dcd" Oct 06 21:43:09 crc kubenswrapper[5014]: I1006 21:43:09.347170 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf" Oct 06 21:43:11 crc kubenswrapper[5014]: I1006 21:43:11.179981 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-858ddd8f98-fzvp8"] Oct 06 21:43:11 crc kubenswrapper[5014]: E1006 21:43:11.180665 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f61ea3cc-1bff-48c1-bd62-79aaf9d617c1" containerName="util" Oct 06 21:43:11 crc kubenswrapper[5014]: I1006 21:43:11.180683 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="f61ea3cc-1bff-48c1-bd62-79aaf9d617c1" containerName="util" Oct 06 21:43:11 crc kubenswrapper[5014]: E1006 21:43:11.180703 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f61ea3cc-1bff-48c1-bd62-79aaf9d617c1" containerName="extract" Oct 06 21:43:11 crc kubenswrapper[5014]: I1006 21:43:11.180711 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="f61ea3cc-1bff-48c1-bd62-79aaf9d617c1" containerName="extract" Oct 06 21:43:11 crc kubenswrapper[5014]: E1006 21:43:11.180720 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f61ea3cc-1bff-48c1-bd62-79aaf9d617c1" containerName="pull" Oct 06 21:43:11 crc kubenswrapper[5014]: I1006 21:43:11.180729 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="f61ea3cc-1bff-48c1-bd62-79aaf9d617c1" containerName="pull" Oct 06 21:43:11 crc kubenswrapper[5014]: I1006 21:43:11.180836 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="f61ea3cc-1bff-48c1-bd62-79aaf9d617c1" containerName="extract" Oct 06 21:43:11 crc kubenswrapper[5014]: I1006 21:43:11.181218 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-858ddd8f98-fzvp8" Oct 06 21:43:11 crc kubenswrapper[5014]: I1006 21:43:11.187886 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Oct 06 21:43:11 crc kubenswrapper[5014]: I1006 21:43:11.190648 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Oct 06 21:43:11 crc kubenswrapper[5014]: I1006 21:43:11.192166 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-sxz7x" Oct 06 21:43:11 crc kubenswrapper[5014]: I1006 21:43:11.194057 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-858ddd8f98-fzvp8"] Oct 06 21:43:11 crc kubenswrapper[5014]: I1006 21:43:11.315695 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qltqc\" (UniqueName: \"kubernetes.io/projected/e4d6ee06-3721-49c5-ab97-06291d64eb68-kube-api-access-qltqc\") pod \"nmstate-operator-858ddd8f98-fzvp8\" (UID: \"e4d6ee06-3721-49c5-ab97-06291d64eb68\") " pod="openshift-nmstate/nmstate-operator-858ddd8f98-fzvp8" Oct 06 21:43:11 crc kubenswrapper[5014]: I1006 21:43:11.416998 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qltqc\" (UniqueName: \"kubernetes.io/projected/e4d6ee06-3721-49c5-ab97-06291d64eb68-kube-api-access-qltqc\") pod \"nmstate-operator-858ddd8f98-fzvp8\" (UID: \"e4d6ee06-3721-49c5-ab97-06291d64eb68\") " pod="openshift-nmstate/nmstate-operator-858ddd8f98-fzvp8" Oct 06 21:43:11 crc kubenswrapper[5014]: I1006 21:43:11.454770 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qltqc\" (UniqueName: \"kubernetes.io/projected/e4d6ee06-3721-49c5-ab97-06291d64eb68-kube-api-access-qltqc\") pod \"nmstate-operator-858ddd8f98-fzvp8\" (UID: \"e4d6ee06-3721-49c5-ab97-06291d64eb68\") " pod="openshift-nmstate/nmstate-operator-858ddd8f98-fzvp8" Oct 06 21:43:11 crc kubenswrapper[5014]: I1006 21:43:11.501410 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-858ddd8f98-fzvp8" Oct 06 21:43:11 crc kubenswrapper[5014]: I1006 21:43:11.686246 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-858ddd8f98-fzvp8"] Oct 06 21:43:12 crc kubenswrapper[5014]: I1006 21:43:12.365075 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-858ddd8f98-fzvp8" event={"ID":"e4d6ee06-3721-49c5-ab97-06291d64eb68","Type":"ContainerStarted","Data":"445706e10296258299cc9f817e45c0661d7eafc98ec1a0a516e9fccd2a6ab005"} Oct 06 21:43:14 crc kubenswrapper[5014]: I1006 21:43:14.387763 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-858ddd8f98-fzvp8" event={"ID":"e4d6ee06-3721-49c5-ab97-06291d64eb68","Type":"ContainerStarted","Data":"b3251117922bf02b5a5fd9c0fb87d4b6e608a4918c10720852b3d16a40ed377a"} Oct 06 21:43:14 crc kubenswrapper[5014]: I1006 21:43:14.410474 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-858ddd8f98-fzvp8" podStartSLOduration=1.486912887 podStartE2EDuration="3.410452124s" podCreationTimestamp="2025-10-06 21:43:11 +0000 UTC" firstStartedPulling="2025-10-06 21:43:11.695763331 +0000 UTC m=+736.988800065" lastFinishedPulling="2025-10-06 21:43:13.619302528 +0000 UTC m=+738.912339302" observedRunningTime="2025-10-06 21:43:14.406485621 +0000 UTC m=+739.699522385" watchObservedRunningTime="2025-10-06 21:43:14.410452124 +0000 UTC m=+739.703488868" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.492252 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-fdff9cb8d-w5jdd"] Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.493685 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-w5jdd" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.496292 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-w9dkw" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.501199 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf"] Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.502045 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.504656 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.508754 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-fdff9cb8d-w5jdd"] Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.552368 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf"] Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.559976 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-fzdcf"] Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.560646 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-fzdcf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.576456 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5mxj\" (UniqueName: \"kubernetes.io/projected/6c36947b-29b2-4c5e-888f-05d99e9a7ffd-kube-api-access-m5mxj\") pod \"nmstate-metrics-fdff9cb8d-w5jdd\" (UID: \"6c36947b-29b2-4c5e-888f-05d99e9a7ffd\") " pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-w5jdd" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.576549 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/e36e22ae-e89e-4303-a79c-7d590084348e-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-shqhf\" (UID: \"e36e22ae-e89e-4303-a79c-7d590084348e\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.576660 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xm6p\" (UniqueName: \"kubernetes.io/projected/e36e22ae-e89e-4303-a79c-7d590084348e-kube-api-access-6xm6p\") pod \"nmstate-webhook-6cdbc54649-shqhf\" (UID: \"e36e22ae-e89e-4303-a79c-7d590084348e\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.677532 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/886eb5f7-fd1f-4153-b290-a36f31fe58b8-ovs-socket\") pod \"nmstate-handler-fzdcf\" (UID: \"886eb5f7-fd1f-4153-b290-a36f31fe58b8\") " pod="openshift-nmstate/nmstate-handler-fzdcf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.677576 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/e36e22ae-e89e-4303-a79c-7d590084348e-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-shqhf\" (UID: \"e36e22ae-e89e-4303-a79c-7d590084348e\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.677613 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/886eb5f7-fd1f-4153-b290-a36f31fe58b8-nmstate-lock\") pod \"nmstate-handler-fzdcf\" (UID: \"886eb5f7-fd1f-4153-b290-a36f31fe58b8\") " pod="openshift-nmstate/nmstate-handler-fzdcf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.677650 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xm6p\" (UniqueName: \"kubernetes.io/projected/e36e22ae-e89e-4303-a79c-7d590084348e-kube-api-access-6xm6p\") pod \"nmstate-webhook-6cdbc54649-shqhf\" (UID: \"e36e22ae-e89e-4303-a79c-7d590084348e\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.677682 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/886eb5f7-fd1f-4153-b290-a36f31fe58b8-dbus-socket\") pod \"nmstate-handler-fzdcf\" (UID: \"886eb5f7-fd1f-4153-b290-a36f31fe58b8\") " pod="openshift-nmstate/nmstate-handler-fzdcf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.677702 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5mxj\" (UniqueName: \"kubernetes.io/projected/6c36947b-29b2-4c5e-888f-05d99e9a7ffd-kube-api-access-m5mxj\") pod \"nmstate-metrics-fdff9cb8d-w5jdd\" (UID: \"6c36947b-29b2-4c5e-888f-05d99e9a7ffd\") " pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-w5jdd" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.677735 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-278lb\" (UniqueName: \"kubernetes.io/projected/886eb5f7-fd1f-4153-b290-a36f31fe58b8-kube-api-access-278lb\") pod \"nmstate-handler-fzdcf\" (UID: \"886eb5f7-fd1f-4153-b290-a36f31fe58b8\") " pod="openshift-nmstate/nmstate-handler-fzdcf" Oct 06 21:43:15 crc kubenswrapper[5014]: E1006 21:43:15.677860 5014 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Oct 06 21:43:15 crc kubenswrapper[5014]: E1006 21:43:15.677903 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e36e22ae-e89e-4303-a79c-7d590084348e-tls-key-pair podName:e36e22ae-e89e-4303-a79c-7d590084348e nodeName:}" failed. No retries permitted until 2025-10-06 21:43:16.17788602 +0000 UTC m=+741.470922754 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/e36e22ae-e89e-4303-a79c-7d590084348e-tls-key-pair") pod "nmstate-webhook-6cdbc54649-shqhf" (UID: "e36e22ae-e89e-4303-a79c-7d590084348e") : secret "openshift-nmstate-webhook" not found Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.700638 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5mxj\" (UniqueName: \"kubernetes.io/projected/6c36947b-29b2-4c5e-888f-05d99e9a7ffd-kube-api-access-m5mxj\") pod \"nmstate-metrics-fdff9cb8d-w5jdd\" (UID: \"6c36947b-29b2-4c5e-888f-05d99e9a7ffd\") " pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-w5jdd" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.715819 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xm6p\" (UniqueName: \"kubernetes.io/projected/e36e22ae-e89e-4303-a79c-7d590084348e-kube-api-access-6xm6p\") pod \"nmstate-webhook-6cdbc54649-shqhf\" (UID: \"e36e22ae-e89e-4303-a79c-7d590084348e\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.724249 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4"] Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.724860 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.726842 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.726880 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-vmlmb" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.735189 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.736510 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4"] Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.779217 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/886eb5f7-fd1f-4153-b290-a36f31fe58b8-dbus-socket\") pod \"nmstate-handler-fzdcf\" (UID: \"886eb5f7-fd1f-4153-b290-a36f31fe58b8\") " pod="openshift-nmstate/nmstate-handler-fzdcf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.779281 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-278lb\" (UniqueName: \"kubernetes.io/projected/886eb5f7-fd1f-4153-b290-a36f31fe58b8-kube-api-access-278lb\") pod \"nmstate-handler-fzdcf\" (UID: \"886eb5f7-fd1f-4153-b290-a36f31fe58b8\") " pod="openshift-nmstate/nmstate-handler-fzdcf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.779311 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/886eb5f7-fd1f-4153-b290-a36f31fe58b8-ovs-socket\") pod \"nmstate-handler-fzdcf\" (UID: \"886eb5f7-fd1f-4153-b290-a36f31fe58b8\") " pod="openshift-nmstate/nmstate-handler-fzdcf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.779352 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/886eb5f7-fd1f-4153-b290-a36f31fe58b8-nmstate-lock\") pod \"nmstate-handler-fzdcf\" (UID: \"886eb5f7-fd1f-4153-b290-a36f31fe58b8\") " pod="openshift-nmstate/nmstate-handler-fzdcf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.779418 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/886eb5f7-fd1f-4153-b290-a36f31fe58b8-nmstate-lock\") pod \"nmstate-handler-fzdcf\" (UID: \"886eb5f7-fd1f-4153-b290-a36f31fe58b8\") " pod="openshift-nmstate/nmstate-handler-fzdcf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.779462 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/886eb5f7-fd1f-4153-b290-a36f31fe58b8-ovs-socket\") pod \"nmstate-handler-fzdcf\" (UID: \"886eb5f7-fd1f-4153-b290-a36f31fe58b8\") " pod="openshift-nmstate/nmstate-handler-fzdcf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.779793 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/886eb5f7-fd1f-4153-b290-a36f31fe58b8-dbus-socket\") pod \"nmstate-handler-fzdcf\" (UID: \"886eb5f7-fd1f-4153-b290-a36f31fe58b8\") " pod="openshift-nmstate/nmstate-handler-fzdcf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.798732 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-278lb\" (UniqueName: \"kubernetes.io/projected/886eb5f7-fd1f-4153-b290-a36f31fe58b8-kube-api-access-278lb\") pod \"nmstate-handler-fzdcf\" (UID: \"886eb5f7-fd1f-4153-b290-a36f31fe58b8\") " pod="openshift-nmstate/nmstate-handler-fzdcf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.814319 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-w5jdd" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.873508 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-fzdcf" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.880075 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbcgz\" (UniqueName: \"kubernetes.io/projected/054f0bcd-6bff-4bda-87f2-5863aa08d4f9-kube-api-access-kbcgz\") pod \"nmstate-console-plugin-6b874cbd85-8b8c4\" (UID: \"054f0bcd-6bff-4bda-87f2-5863aa08d4f9\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.880151 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/054f0bcd-6bff-4bda-87f2-5863aa08d4f9-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-8b8c4\" (UID: \"054f0bcd-6bff-4bda-87f2-5863aa08d4f9\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.880190 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/054f0bcd-6bff-4bda-87f2-5863aa08d4f9-nginx-conf\") pod \"nmstate-console-plugin-6b874cbd85-8b8c4\" (UID: \"054f0bcd-6bff-4bda-87f2-5863aa08d4f9\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4" Oct 06 21:43:15 crc kubenswrapper[5014]: W1006 21:43:15.912783 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod886eb5f7_fd1f_4153_b290_a36f31fe58b8.slice/crio-255e2bc3947bbeabb7aa4c007a300cb7d86ed743337f0243497f377efb0b5727 WatchSource:0}: Error finding container 255e2bc3947bbeabb7aa4c007a300cb7d86ed743337f0243497f377efb0b5727: Status 404 returned error can't find the container with id 255e2bc3947bbeabb7aa4c007a300cb7d86ed743337f0243497f377efb0b5727 Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.924465 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-68bf86dbc-n2n9n"] Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.925250 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.927148 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-68bf86dbc-n2n9n"] Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.983024 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f5614533-f5f7-475f-a337-b5c2644f43cf-service-ca\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.983065 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbcgz\" (UniqueName: \"kubernetes.io/projected/054f0bcd-6bff-4bda-87f2-5863aa08d4f9-kube-api-access-kbcgz\") pod \"nmstate-console-plugin-6b874cbd85-8b8c4\" (UID: \"054f0bcd-6bff-4bda-87f2-5863aa08d4f9\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.983084 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f5614533-f5f7-475f-a337-b5c2644f43cf-oauth-serving-cert\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.983103 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f5614533-f5f7-475f-a337-b5c2644f43cf-console-oauth-config\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.983124 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f5614533-f5f7-475f-a337-b5c2644f43cf-trusted-ca-bundle\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.983145 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f5614533-f5f7-475f-a337-b5c2644f43cf-console-config\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.983162 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2r4h4\" (UniqueName: \"kubernetes.io/projected/f5614533-f5f7-475f-a337-b5c2644f43cf-kube-api-access-2r4h4\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.983539 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/054f0bcd-6bff-4bda-87f2-5863aa08d4f9-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-8b8c4\" (UID: \"054f0bcd-6bff-4bda-87f2-5863aa08d4f9\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.983565 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/054f0bcd-6bff-4bda-87f2-5863aa08d4f9-nginx-conf\") pod \"nmstate-console-plugin-6b874cbd85-8b8c4\" (UID: \"054f0bcd-6bff-4bda-87f2-5863aa08d4f9\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.983640 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f5614533-f5f7-475f-a337-b5c2644f43cf-console-serving-cert\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.984549 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/054f0bcd-6bff-4bda-87f2-5863aa08d4f9-nginx-conf\") pod \"nmstate-console-plugin-6b874cbd85-8b8c4\" (UID: \"054f0bcd-6bff-4bda-87f2-5863aa08d4f9\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4" Oct 06 21:43:15 crc kubenswrapper[5014]: I1006 21:43:15.989347 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/054f0bcd-6bff-4bda-87f2-5863aa08d4f9-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-8b8c4\" (UID: \"054f0bcd-6bff-4bda-87f2-5863aa08d4f9\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.001738 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbcgz\" (UniqueName: \"kubernetes.io/projected/054f0bcd-6bff-4bda-87f2-5863aa08d4f9-kube-api-access-kbcgz\") pod \"nmstate-console-plugin-6b874cbd85-8b8c4\" (UID: \"054f0bcd-6bff-4bda-87f2-5863aa08d4f9\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.049039 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.084469 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f5614533-f5f7-475f-a337-b5c2644f43cf-console-serving-cert\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.084786 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f5614533-f5f7-475f-a337-b5c2644f43cf-service-ca\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.084805 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f5614533-f5f7-475f-a337-b5c2644f43cf-oauth-serving-cert\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.084848 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f5614533-f5f7-475f-a337-b5c2644f43cf-console-oauth-config\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.084868 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f5614533-f5f7-475f-a337-b5c2644f43cf-trusted-ca-bundle\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.084888 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f5614533-f5f7-475f-a337-b5c2644f43cf-console-config\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.084928 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2r4h4\" (UniqueName: \"kubernetes.io/projected/f5614533-f5f7-475f-a337-b5c2644f43cf-kube-api-access-2r4h4\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.085805 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-fdff9cb8d-w5jdd"] Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.086861 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f5614533-f5f7-475f-a337-b5c2644f43cf-service-ca\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.087240 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f5614533-f5f7-475f-a337-b5c2644f43cf-console-config\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.087423 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f5614533-f5f7-475f-a337-b5c2644f43cf-oauth-serving-cert\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.087744 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f5614533-f5f7-475f-a337-b5c2644f43cf-trusted-ca-bundle\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.090765 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f5614533-f5f7-475f-a337-b5c2644f43cf-console-serving-cert\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.091154 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f5614533-f5f7-475f-a337-b5c2644f43cf-console-oauth-config\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:16 crc kubenswrapper[5014]: W1006 21:43:16.103335 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6c36947b_29b2_4c5e_888f_05d99e9a7ffd.slice/crio-276a12f8778f7d22036fa972924cd14c655f3d3ea510dad1252d6613da98815b WatchSource:0}: Error finding container 276a12f8778f7d22036fa972924cd14c655f3d3ea510dad1252d6613da98815b: Status 404 returned error can't find the container with id 276a12f8778f7d22036fa972924cd14c655f3d3ea510dad1252d6613da98815b Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.104368 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2r4h4\" (UniqueName: \"kubernetes.io/projected/f5614533-f5f7-475f-a337-b5c2644f43cf-kube-api-access-2r4h4\") pod \"console-68bf86dbc-n2n9n\" (UID: \"f5614533-f5f7-475f-a337-b5c2644f43cf\") " pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.186598 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/e36e22ae-e89e-4303-a79c-7d590084348e-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-shqhf\" (UID: \"e36e22ae-e89e-4303-a79c-7d590084348e\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.190508 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/e36e22ae-e89e-4303-a79c-7d590084348e-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-shqhf\" (UID: \"e36e22ae-e89e-4303-a79c-7d590084348e\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.212847 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4"] Oct 06 21:43:16 crc kubenswrapper[5014]: W1006 21:43:16.216999 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod054f0bcd_6bff_4bda_87f2_5863aa08d4f9.slice/crio-c1000531ef7f4004eab638e247592bbce791feaa0dc415fac784b13b4d3d8889 WatchSource:0}: Error finding container c1000531ef7f4004eab638e247592bbce791feaa0dc415fac784b13b4d3d8889: Status 404 returned error can't find the container with id c1000531ef7f4004eab638e247592bbce791feaa0dc415fac784b13b4d3d8889 Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.279062 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.422943 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf" Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.427235 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-w5jdd" event={"ID":"6c36947b-29b2-4c5e-888f-05d99e9a7ffd","Type":"ContainerStarted","Data":"276a12f8778f7d22036fa972924cd14c655f3d3ea510dad1252d6613da98815b"} Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.429036 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-fzdcf" event={"ID":"886eb5f7-fd1f-4153-b290-a36f31fe58b8","Type":"ContainerStarted","Data":"255e2bc3947bbeabb7aa4c007a300cb7d86ed743337f0243497f377efb0b5727"} Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.430519 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4" event={"ID":"054f0bcd-6bff-4bda-87f2-5863aa08d4f9","Type":"ContainerStarted","Data":"c1000531ef7f4004eab638e247592bbce791feaa0dc415fac784b13b4d3d8889"} Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.525763 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-68bf86dbc-n2n9n"] Oct 06 21:43:16 crc kubenswrapper[5014]: W1006 21:43:16.539760 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf5614533_f5f7_475f_a337_b5c2644f43cf.slice/crio-e091a0bceb92b79c56ff582cd02359e51fdd26ec112aee69488c6b718f9eda24 WatchSource:0}: Error finding container e091a0bceb92b79c56ff582cd02359e51fdd26ec112aee69488c6b718f9eda24: Status 404 returned error can't find the container with id e091a0bceb92b79c56ff582cd02359e51fdd26ec112aee69488c6b718f9eda24 Oct 06 21:43:16 crc kubenswrapper[5014]: I1006 21:43:16.625938 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf"] Oct 06 21:43:16 crc kubenswrapper[5014]: W1006 21:43:16.644021 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode36e22ae_e89e_4303_a79c_7d590084348e.slice/crio-b9cfa28baa824cb5a700e9426979c839002a2a5fb31843e562322d12bf9a7763 WatchSource:0}: Error finding container b9cfa28baa824cb5a700e9426979c839002a2a5fb31843e562322d12bf9a7763: Status 404 returned error can't find the container with id b9cfa28baa824cb5a700e9426979c839002a2a5fb31843e562322d12bf9a7763 Oct 06 21:43:17 crc kubenswrapper[5014]: I1006 21:43:17.438482 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf" event={"ID":"e36e22ae-e89e-4303-a79c-7d590084348e","Type":"ContainerStarted","Data":"b9cfa28baa824cb5a700e9426979c839002a2a5fb31843e562322d12bf9a7763"} Oct 06 21:43:17 crc kubenswrapper[5014]: I1006 21:43:17.440482 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-68bf86dbc-n2n9n" event={"ID":"f5614533-f5f7-475f-a337-b5c2644f43cf","Type":"ContainerStarted","Data":"0ba71337ac3345a4f645f57a616f14c18149751deb74a2978a4267806460e8be"} Oct 06 21:43:17 crc kubenswrapper[5014]: I1006 21:43:17.440575 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-68bf86dbc-n2n9n" event={"ID":"f5614533-f5f7-475f-a337-b5c2644f43cf","Type":"ContainerStarted","Data":"e091a0bceb92b79c56ff582cd02359e51fdd26ec112aee69488c6b718f9eda24"} Oct 06 21:43:19 crc kubenswrapper[5014]: I1006 21:43:19.461087 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4" event={"ID":"054f0bcd-6bff-4bda-87f2-5863aa08d4f9","Type":"ContainerStarted","Data":"8faa70ede9da2965d2855069af2fc5afc21f55dc769c96546ae9b2d38b343374"} Oct 06 21:43:19 crc kubenswrapper[5014]: I1006 21:43:19.463974 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-w5jdd" event={"ID":"6c36947b-29b2-4c5e-888f-05d99e9a7ffd","Type":"ContainerStarted","Data":"896b1c0b5cf36f38ad7982323f46f484a18700c18f7d0d5beadc6b0518305b78"} Oct 06 21:43:19 crc kubenswrapper[5014]: I1006 21:43:19.467181 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf" event={"ID":"e36e22ae-e89e-4303-a79c-7d590084348e","Type":"ContainerStarted","Data":"4160d6bb3ac3e93d43f637901a16992287cdb1bb8b4bfd4f07d0e154602ffdbc"} Oct 06 21:43:19 crc kubenswrapper[5014]: I1006 21:43:19.467638 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf" Oct 06 21:43:19 crc kubenswrapper[5014]: I1006 21:43:19.469198 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-fzdcf" event={"ID":"886eb5f7-fd1f-4153-b290-a36f31fe58b8","Type":"ContainerStarted","Data":"e4a1556d0211f1c9f57a28ab6c1a4cd36c82904aab8d52017a28337b06980ddd"} Oct 06 21:43:19 crc kubenswrapper[5014]: I1006 21:43:19.469352 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-fzdcf" Oct 06 21:43:19 crc kubenswrapper[5014]: I1006 21:43:19.482030 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-68bf86dbc-n2n9n" podStartSLOduration=4.482014143 podStartE2EDuration="4.482014143s" podCreationTimestamp="2025-10-06 21:43:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:43:17.465732422 +0000 UTC m=+742.758769176" watchObservedRunningTime="2025-10-06 21:43:19.482014143 +0000 UTC m=+744.775050877" Oct 06 21:43:19 crc kubenswrapper[5014]: I1006 21:43:19.483066 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-8b8c4" podStartSLOduration=1.872069555 podStartE2EDuration="4.483059975s" podCreationTimestamp="2025-10-06 21:43:15 +0000 UTC" firstStartedPulling="2025-10-06 21:43:16.218874174 +0000 UTC m=+741.511910908" lastFinishedPulling="2025-10-06 21:43:18.829864574 +0000 UTC m=+744.122901328" observedRunningTime="2025-10-06 21:43:19.478595417 +0000 UTC m=+744.771632191" watchObservedRunningTime="2025-10-06 21:43:19.483059975 +0000 UTC m=+744.776096709" Oct 06 21:43:19 crc kubenswrapper[5014]: I1006 21:43:19.513139 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-fzdcf" podStartSLOduration=1.611983515 podStartE2EDuration="4.513120607s" podCreationTimestamp="2025-10-06 21:43:15 +0000 UTC" firstStartedPulling="2025-10-06 21:43:15.916418061 +0000 UTC m=+741.209454795" lastFinishedPulling="2025-10-06 21:43:18.817555143 +0000 UTC m=+744.110591887" observedRunningTime="2025-10-06 21:43:19.508355639 +0000 UTC m=+744.801392373" watchObservedRunningTime="2025-10-06 21:43:19.513120607 +0000 UTC m=+744.806157331" Oct 06 21:43:19 crc kubenswrapper[5014]: I1006 21:43:19.530697 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf" podStartSLOduration=2.3551072250000002 podStartE2EDuration="4.529860286s" podCreationTimestamp="2025-10-06 21:43:15 +0000 UTC" firstStartedPulling="2025-10-06 21:43:16.646904048 +0000 UTC m=+741.939940812" lastFinishedPulling="2025-10-06 21:43:18.821657139 +0000 UTC m=+744.114693873" observedRunningTime="2025-10-06 21:43:19.522005703 +0000 UTC m=+744.815042437" watchObservedRunningTime="2025-10-06 21:43:19.529860286 +0000 UTC m=+744.822897030" Oct 06 21:43:21 crc kubenswrapper[5014]: I1006 21:43:21.529466 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-w5jdd" event={"ID":"6c36947b-29b2-4c5e-888f-05d99e9a7ffd","Type":"ContainerStarted","Data":"71530b9b0eeb790b7e0ed4158941f454b307431d3a42cee3924d64af9ed0a7e5"} Oct 06 21:43:21 crc kubenswrapper[5014]: I1006 21:43:21.555431 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-w5jdd" podStartSLOduration=1.420188623 podStartE2EDuration="6.555407314s" podCreationTimestamp="2025-10-06 21:43:15 +0000 UTC" firstStartedPulling="2025-10-06 21:43:16.110187326 +0000 UTC m=+741.403224060" lastFinishedPulling="2025-10-06 21:43:21.245406017 +0000 UTC m=+746.538442751" observedRunningTime="2025-10-06 21:43:21.548858951 +0000 UTC m=+746.841895725" watchObservedRunningTime="2025-10-06 21:43:21.555407314 +0000 UTC m=+746.848444088" Oct 06 21:43:21 crc kubenswrapper[5014]: I1006 21:43:21.735021 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:43:21 crc kubenswrapper[5014]: I1006 21:43:21.735075 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:43:24 crc kubenswrapper[5014]: I1006 21:43:24.477094 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pc5xx"] Oct 06 21:43:24 crc kubenswrapper[5014]: I1006 21:43:24.478106 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" podUID="48bcee5f-3c11-4784-aa10-c5673058c7b1" containerName="controller-manager" containerID="cri-o://8833357ffa2c7fb32669aac8e4012e6f38da5819aadb86f5199d6e219bd5d726" gracePeriod=30 Oct 06 21:43:24 crc kubenswrapper[5014]: I1006 21:43:24.576229 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng"] Oct 06 21:43:24 crc kubenswrapper[5014]: I1006 21:43:24.576532 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" podUID="b9aabe83-7840-47cc-b3d5-72b068737094" containerName="route-controller-manager" containerID="cri-o://b6a6bf77573e8fb450b57b5302fb6348e423ca3170de1ac758713bce2e4db8af" gracePeriod=30 Oct 06 21:43:24 crc kubenswrapper[5014]: I1006 21:43:24.875433 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:43:24 crc kubenswrapper[5014]: I1006 21:43:24.939342 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:43:24 crc kubenswrapper[5014]: I1006 21:43:24.939685 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-client-ca\") pod \"48bcee5f-3c11-4784-aa10-c5673058c7b1\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " Oct 06 21:43:24 crc kubenswrapper[5014]: I1006 21:43:24.939737 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48bcee5f-3c11-4784-aa10-c5673058c7b1-serving-cert\") pod \"48bcee5f-3c11-4784-aa10-c5673058c7b1\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " Oct 06 21:43:24 crc kubenswrapper[5014]: I1006 21:43:24.939801 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-proxy-ca-bundles\") pod \"48bcee5f-3c11-4784-aa10-c5673058c7b1\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " Oct 06 21:43:24 crc kubenswrapper[5014]: I1006 21:43:24.939851 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sz9zw\" (UniqueName: \"kubernetes.io/projected/48bcee5f-3c11-4784-aa10-c5673058c7b1-kube-api-access-sz9zw\") pod \"48bcee5f-3c11-4784-aa10-c5673058c7b1\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " Oct 06 21:43:24 crc kubenswrapper[5014]: I1006 21:43:24.940003 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-config\") pod \"48bcee5f-3c11-4784-aa10-c5673058c7b1\" (UID: \"48bcee5f-3c11-4784-aa10-c5673058c7b1\") " Oct 06 21:43:24 crc kubenswrapper[5014]: I1006 21:43:24.940529 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-client-ca" (OuterVolumeSpecName: "client-ca") pod "48bcee5f-3c11-4784-aa10-c5673058c7b1" (UID: "48bcee5f-3c11-4784-aa10-c5673058c7b1"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:43:24 crc kubenswrapper[5014]: I1006 21:43:24.940544 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "48bcee5f-3c11-4784-aa10-c5673058c7b1" (UID: "48bcee5f-3c11-4784-aa10-c5673058c7b1"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:43:24 crc kubenswrapper[5014]: I1006 21:43:24.941228 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-config" (OuterVolumeSpecName: "config") pod "48bcee5f-3c11-4784-aa10-c5673058c7b1" (UID: "48bcee5f-3c11-4784-aa10-c5673058c7b1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:43:24 crc kubenswrapper[5014]: I1006 21:43:24.946974 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48bcee5f-3c11-4784-aa10-c5673058c7b1-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "48bcee5f-3c11-4784-aa10-c5673058c7b1" (UID: "48bcee5f-3c11-4784-aa10-c5673058c7b1"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:43:24 crc kubenswrapper[5014]: I1006 21:43:24.947019 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48bcee5f-3c11-4784-aa10-c5673058c7b1-kube-api-access-sz9zw" (OuterVolumeSpecName: "kube-api-access-sz9zw") pod "48bcee5f-3c11-4784-aa10-c5673058c7b1" (UID: "48bcee5f-3c11-4784-aa10-c5673058c7b1"). InnerVolumeSpecName "kube-api-access-sz9zw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.040987 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b9aabe83-7840-47cc-b3d5-72b068737094-client-ca\") pod \"b9aabe83-7840-47cc-b3d5-72b068737094\" (UID: \"b9aabe83-7840-47cc-b3d5-72b068737094\") " Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.041029 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9aabe83-7840-47cc-b3d5-72b068737094-config\") pod \"b9aabe83-7840-47cc-b3d5-72b068737094\" (UID: \"b9aabe83-7840-47cc-b3d5-72b068737094\") " Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.041047 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9aabe83-7840-47cc-b3d5-72b068737094-serving-cert\") pod \"b9aabe83-7840-47cc-b3d5-72b068737094\" (UID: \"b9aabe83-7840-47cc-b3d5-72b068737094\") " Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.041134 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rsj5g\" (UniqueName: \"kubernetes.io/projected/b9aabe83-7840-47cc-b3d5-72b068737094-kube-api-access-rsj5g\") pod \"b9aabe83-7840-47cc-b3d5-72b068737094\" (UID: \"b9aabe83-7840-47cc-b3d5-72b068737094\") " Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.041322 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.041332 5014 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-client-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.041341 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48bcee5f-3c11-4784-aa10-c5673058c7b1-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.041349 5014 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/48bcee5f-3c11-4784-aa10-c5673058c7b1-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.041360 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sz9zw\" (UniqueName: \"kubernetes.io/projected/48bcee5f-3c11-4784-aa10-c5673058c7b1-kube-api-access-sz9zw\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.041857 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9aabe83-7840-47cc-b3d5-72b068737094-config" (OuterVolumeSpecName: "config") pod "b9aabe83-7840-47cc-b3d5-72b068737094" (UID: "b9aabe83-7840-47cc-b3d5-72b068737094"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.042332 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9aabe83-7840-47cc-b3d5-72b068737094-client-ca" (OuterVolumeSpecName: "client-ca") pod "b9aabe83-7840-47cc-b3d5-72b068737094" (UID: "b9aabe83-7840-47cc-b3d5-72b068737094"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.044958 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9aabe83-7840-47cc-b3d5-72b068737094-kube-api-access-rsj5g" (OuterVolumeSpecName: "kube-api-access-rsj5g") pod "b9aabe83-7840-47cc-b3d5-72b068737094" (UID: "b9aabe83-7840-47cc-b3d5-72b068737094"). InnerVolumeSpecName "kube-api-access-rsj5g". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.045044 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9aabe83-7840-47cc-b3d5-72b068737094-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b9aabe83-7840-47cc-b3d5-72b068737094" (UID: "b9aabe83-7840-47cc-b3d5-72b068737094"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.142391 5014 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b9aabe83-7840-47cc-b3d5-72b068737094-client-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.142421 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9aabe83-7840-47cc-b3d5-72b068737094-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.142433 5014 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9aabe83-7840-47cc-b3d5-72b068737094-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.142443 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rsj5g\" (UniqueName: \"kubernetes.io/projected/b9aabe83-7840-47cc-b3d5-72b068737094-kube-api-access-rsj5g\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.565267 5014 generic.go:334] "Generic (PLEG): container finished" podID="48bcee5f-3c11-4784-aa10-c5673058c7b1" containerID="8833357ffa2c7fb32669aac8e4012e6f38da5819aadb86f5199d6e219bd5d726" exitCode=0 Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.565330 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.565359 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" event={"ID":"48bcee5f-3c11-4784-aa10-c5673058c7b1","Type":"ContainerDied","Data":"8833357ffa2c7fb32669aac8e4012e6f38da5819aadb86f5199d6e219bd5d726"} Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.565392 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-pc5xx" event={"ID":"48bcee5f-3c11-4784-aa10-c5673058c7b1","Type":"ContainerDied","Data":"2f0677a493954a44b9500b2756251874b0926c0f04bc551033d184536df17f54"} Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.565421 5014 scope.go:117] "RemoveContainer" containerID="8833357ffa2c7fb32669aac8e4012e6f38da5819aadb86f5199d6e219bd5d726" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.568686 5014 generic.go:334] "Generic (PLEG): container finished" podID="b9aabe83-7840-47cc-b3d5-72b068737094" containerID="b6a6bf77573e8fb450b57b5302fb6348e423ca3170de1ac758713bce2e4db8af" exitCode=0 Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.568757 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" event={"ID":"b9aabe83-7840-47cc-b3d5-72b068737094","Type":"ContainerDied","Data":"b6a6bf77573e8fb450b57b5302fb6348e423ca3170de1ac758713bce2e4db8af"} Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.568843 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" event={"ID":"b9aabe83-7840-47cc-b3d5-72b068737094","Type":"ContainerDied","Data":"a8b8cf872fee18dc7142474b9a0fb885937b93ab294b29854beaf1f4fa3ad81f"} Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.568768 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.586479 5014 scope.go:117] "RemoveContainer" containerID="8833357ffa2c7fb32669aac8e4012e6f38da5819aadb86f5199d6e219bd5d726" Oct 06 21:43:25 crc kubenswrapper[5014]: E1006 21:43:25.587034 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8833357ffa2c7fb32669aac8e4012e6f38da5819aadb86f5199d6e219bd5d726\": container with ID starting with 8833357ffa2c7fb32669aac8e4012e6f38da5819aadb86f5199d6e219bd5d726 not found: ID does not exist" containerID="8833357ffa2c7fb32669aac8e4012e6f38da5819aadb86f5199d6e219bd5d726" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.587120 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8833357ffa2c7fb32669aac8e4012e6f38da5819aadb86f5199d6e219bd5d726"} err="failed to get container status \"8833357ffa2c7fb32669aac8e4012e6f38da5819aadb86f5199d6e219bd5d726\": rpc error: code = NotFound desc = could not find container \"8833357ffa2c7fb32669aac8e4012e6f38da5819aadb86f5199d6e219bd5d726\": container with ID starting with 8833357ffa2c7fb32669aac8e4012e6f38da5819aadb86f5199d6e219bd5d726 not found: ID does not exist" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.587156 5014 scope.go:117] "RemoveContainer" containerID="b6a6bf77573e8fb450b57b5302fb6348e423ca3170de1ac758713bce2e4db8af" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.609853 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pc5xx"] Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.615464 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pc5xx"] Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.619135 5014 scope.go:117] "RemoveContainer" containerID="b6a6bf77573e8fb450b57b5302fb6348e423ca3170de1ac758713bce2e4db8af" Oct 06 21:43:25 crc kubenswrapper[5014]: E1006 21:43:25.619667 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6a6bf77573e8fb450b57b5302fb6348e423ca3170de1ac758713bce2e4db8af\": container with ID starting with b6a6bf77573e8fb450b57b5302fb6348e423ca3170de1ac758713bce2e4db8af not found: ID does not exist" containerID="b6a6bf77573e8fb450b57b5302fb6348e423ca3170de1ac758713bce2e4db8af" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.619728 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6a6bf77573e8fb450b57b5302fb6348e423ca3170de1ac758713bce2e4db8af"} err="failed to get container status \"b6a6bf77573e8fb450b57b5302fb6348e423ca3170de1ac758713bce2e4db8af\": rpc error: code = NotFound desc = could not find container \"b6a6bf77573e8fb450b57b5302fb6348e423ca3170de1ac758713bce2e4db8af\": container with ID starting with b6a6bf77573e8fb450b57b5302fb6348e423ca3170de1ac758713bce2e4db8af not found: ID does not exist" Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.620959 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng"] Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.625997 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qnjng"] Oct 06 21:43:25 crc kubenswrapper[5014]: I1006 21:43:25.912400 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-fzdcf" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.280604 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.281042 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.287358 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.582265 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-68bf86dbc-n2n9n" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.649149 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-kb2p5"] Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.876212 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24"] Oct 06 21:43:26 crc kubenswrapper[5014]: E1006 21:43:26.877059 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48bcee5f-3c11-4784-aa10-c5673058c7b1" containerName="controller-manager" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.877085 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="48bcee5f-3c11-4784-aa10-c5673058c7b1" containerName="controller-manager" Oct 06 21:43:26 crc kubenswrapper[5014]: E1006 21:43:26.877110 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9aabe83-7840-47cc-b3d5-72b068737094" containerName="route-controller-manager" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.877120 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9aabe83-7840-47cc-b3d5-72b068737094" containerName="route-controller-manager" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.877248 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="48bcee5f-3c11-4784-aa10-c5673058c7b1" containerName="controller-manager" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.877276 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9aabe83-7840-47cc-b3d5-72b068737094" containerName="route-controller-manager" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.877866 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.883356 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-58984796d6-6dnzt"] Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.883885 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.884910 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.885358 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.887128 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.888519 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.893370 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.901299 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.901582 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.902654 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.909383 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.909798 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.911689 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.919762 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.920002 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24"] Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.922139 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.927846 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58984796d6-6dnzt"] Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.964014 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5fe4552c-e5d9-4fa1-af6f-82a94af33ce4-client-ca\") pod \"controller-manager-58984796d6-6dnzt\" (UID: \"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4\") " pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.964101 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e7c54d2d-8d99-42eb-8311-a6b0d4c1995d-client-ca\") pod \"route-controller-manager-b55d98d7d-6vw24\" (UID: \"e7c54d2d-8d99-42eb-8311-a6b0d4c1995d\") " pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.964155 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7c54d2d-8d99-42eb-8311-a6b0d4c1995d-config\") pod \"route-controller-manager-b55d98d7d-6vw24\" (UID: \"e7c54d2d-8d99-42eb-8311-a6b0d4c1995d\") " pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.964200 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7c54d2d-8d99-42eb-8311-a6b0d4c1995d-serving-cert\") pod \"route-controller-manager-b55d98d7d-6vw24\" (UID: \"e7c54d2d-8d99-42eb-8311-a6b0d4c1995d\") " pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.964266 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9h879\" (UniqueName: \"kubernetes.io/projected/e7c54d2d-8d99-42eb-8311-a6b0d4c1995d-kube-api-access-9h879\") pod \"route-controller-manager-b55d98d7d-6vw24\" (UID: \"e7c54d2d-8d99-42eb-8311-a6b0d4c1995d\") " pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.964334 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fe4552c-e5d9-4fa1-af6f-82a94af33ce4-config\") pod \"controller-manager-58984796d6-6dnzt\" (UID: \"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4\") " pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.964368 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5fe4552c-e5d9-4fa1-af6f-82a94af33ce4-proxy-ca-bundles\") pod \"controller-manager-58984796d6-6dnzt\" (UID: \"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4\") " pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.964412 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8lg9\" (UniqueName: \"kubernetes.io/projected/5fe4552c-e5d9-4fa1-af6f-82a94af33ce4-kube-api-access-j8lg9\") pod \"controller-manager-58984796d6-6dnzt\" (UID: \"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4\") " pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:26 crc kubenswrapper[5014]: I1006 21:43:26.964475 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5fe4552c-e5d9-4fa1-af6f-82a94af33ce4-serving-cert\") pod \"controller-manager-58984796d6-6dnzt\" (UID: \"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4\") " pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.065983 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9h879\" (UniqueName: \"kubernetes.io/projected/e7c54d2d-8d99-42eb-8311-a6b0d4c1995d-kube-api-access-9h879\") pod \"route-controller-manager-b55d98d7d-6vw24\" (UID: \"e7c54d2d-8d99-42eb-8311-a6b0d4c1995d\") " pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.066084 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fe4552c-e5d9-4fa1-af6f-82a94af33ce4-config\") pod \"controller-manager-58984796d6-6dnzt\" (UID: \"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4\") " pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.066114 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5fe4552c-e5d9-4fa1-af6f-82a94af33ce4-proxy-ca-bundles\") pod \"controller-manager-58984796d6-6dnzt\" (UID: \"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4\") " pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.066145 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8lg9\" (UniqueName: \"kubernetes.io/projected/5fe4552c-e5d9-4fa1-af6f-82a94af33ce4-kube-api-access-j8lg9\") pod \"controller-manager-58984796d6-6dnzt\" (UID: \"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4\") " pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.066173 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5fe4552c-e5d9-4fa1-af6f-82a94af33ce4-serving-cert\") pod \"controller-manager-58984796d6-6dnzt\" (UID: \"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4\") " pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.066210 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5fe4552c-e5d9-4fa1-af6f-82a94af33ce4-client-ca\") pod \"controller-manager-58984796d6-6dnzt\" (UID: \"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4\") " pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.066234 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e7c54d2d-8d99-42eb-8311-a6b0d4c1995d-client-ca\") pod \"route-controller-manager-b55d98d7d-6vw24\" (UID: \"e7c54d2d-8d99-42eb-8311-a6b0d4c1995d\") " pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.066266 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7c54d2d-8d99-42eb-8311-a6b0d4c1995d-config\") pod \"route-controller-manager-b55d98d7d-6vw24\" (UID: \"e7c54d2d-8d99-42eb-8311-a6b0d4c1995d\") " pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.066286 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7c54d2d-8d99-42eb-8311-a6b0d4c1995d-serving-cert\") pod \"route-controller-manager-b55d98d7d-6vw24\" (UID: \"e7c54d2d-8d99-42eb-8311-a6b0d4c1995d\") " pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.067334 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5fe4552c-e5d9-4fa1-af6f-82a94af33ce4-client-ca\") pod \"controller-manager-58984796d6-6dnzt\" (UID: \"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4\") " pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.067503 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e7c54d2d-8d99-42eb-8311-a6b0d4c1995d-client-ca\") pod \"route-controller-manager-b55d98d7d-6vw24\" (UID: \"e7c54d2d-8d99-42eb-8311-a6b0d4c1995d\") " pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.067768 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fe4552c-e5d9-4fa1-af6f-82a94af33ce4-config\") pod \"controller-manager-58984796d6-6dnzt\" (UID: \"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4\") " pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.068014 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5fe4552c-e5d9-4fa1-af6f-82a94af33ce4-proxy-ca-bundles\") pod \"controller-manager-58984796d6-6dnzt\" (UID: \"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4\") " pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.068178 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7c54d2d-8d99-42eb-8311-a6b0d4c1995d-config\") pod \"route-controller-manager-b55d98d7d-6vw24\" (UID: \"e7c54d2d-8d99-42eb-8311-a6b0d4c1995d\") " pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.082085 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7c54d2d-8d99-42eb-8311-a6b0d4c1995d-serving-cert\") pod \"route-controller-manager-b55d98d7d-6vw24\" (UID: \"e7c54d2d-8d99-42eb-8311-a6b0d4c1995d\") " pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.090177 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5fe4552c-e5d9-4fa1-af6f-82a94af33ce4-serving-cert\") pod \"controller-manager-58984796d6-6dnzt\" (UID: \"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4\") " pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.090867 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9h879\" (UniqueName: \"kubernetes.io/projected/e7c54d2d-8d99-42eb-8311-a6b0d4c1995d-kube-api-access-9h879\") pod \"route-controller-manager-b55d98d7d-6vw24\" (UID: \"e7c54d2d-8d99-42eb-8311-a6b0d4c1995d\") " pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.097548 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8lg9\" (UniqueName: \"kubernetes.io/projected/5fe4552c-e5d9-4fa1-af6f-82a94af33ce4-kube-api-access-j8lg9\") pod \"controller-manager-58984796d6-6dnzt\" (UID: \"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4\") " pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.203009 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.233326 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.494231 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48bcee5f-3c11-4784-aa10-c5673058c7b1" path="/var/lib/kubelet/pods/48bcee5f-3c11-4784-aa10-c5673058c7b1/volumes" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.495124 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9aabe83-7840-47cc-b3d5-72b068737094" path="/var/lib/kubelet/pods/b9aabe83-7840-47cc-b3d5-72b068737094/volumes" Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.661134 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24"] Oct 06 21:43:27 crc kubenswrapper[5014]: W1006 21:43:27.670270 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode7c54d2d_8d99_42eb_8311_a6b0d4c1995d.slice/crio-92de81394ac38a8c0d70e1aa8183ed8f569902a5d6f4ef90991a00fa9d28db46 WatchSource:0}: Error finding container 92de81394ac38a8c0d70e1aa8183ed8f569902a5d6f4ef90991a00fa9d28db46: Status 404 returned error can't find the container with id 92de81394ac38a8c0d70e1aa8183ed8f569902a5d6f4ef90991a00fa9d28db46 Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.696706 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58984796d6-6dnzt"] Oct 06 21:43:27 crc kubenswrapper[5014]: W1006 21:43:27.701291 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe4552c_e5d9_4fa1_af6f_82a94af33ce4.slice/crio-6b5b773c13c6cc945616243451756db27b3788b4dcf081aa93389719ecf771ef WatchSource:0}: Error finding container 6b5b773c13c6cc945616243451756db27b3788b4dcf081aa93389719ecf771ef: Status 404 returned error can't find the container with id 6b5b773c13c6cc945616243451756db27b3788b4dcf081aa93389719ecf771ef Oct 06 21:43:27 crc kubenswrapper[5014]: I1006 21:43:27.813104 5014 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Oct 06 21:43:28 crc kubenswrapper[5014]: I1006 21:43:28.590600 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" event={"ID":"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4","Type":"ContainerStarted","Data":"ed4fae8358b0f4cea381a313c08c57dfe5dd5e46e592f7885c7bbc9619d6eef9"} Oct 06 21:43:28 crc kubenswrapper[5014]: I1006 21:43:28.591002 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" event={"ID":"5fe4552c-e5d9-4fa1-af6f-82a94af33ce4","Type":"ContainerStarted","Data":"6b5b773c13c6cc945616243451756db27b3788b4dcf081aa93389719ecf771ef"} Oct 06 21:43:28 crc kubenswrapper[5014]: I1006 21:43:28.591023 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:28 crc kubenswrapper[5014]: I1006 21:43:28.592584 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" event={"ID":"e7c54d2d-8d99-42eb-8311-a6b0d4c1995d","Type":"ContainerStarted","Data":"14fa582fb38b58c549570d3ae0a5fc82830b0e6c1108a120b4e977886c1f7f81"} Oct 06 21:43:28 crc kubenswrapper[5014]: I1006 21:43:28.592676 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" event={"ID":"e7c54d2d-8d99-42eb-8311-a6b0d4c1995d","Type":"ContainerStarted","Data":"92de81394ac38a8c0d70e1aa8183ed8f569902a5d6f4ef90991a00fa9d28db46"} Oct 06 21:43:28 crc kubenswrapper[5014]: I1006 21:43:28.592966 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" Oct 06 21:43:28 crc kubenswrapper[5014]: I1006 21:43:28.595704 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" Oct 06 21:43:28 crc kubenswrapper[5014]: I1006 21:43:28.599241 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" Oct 06 21:43:28 crc kubenswrapper[5014]: I1006 21:43:28.614748 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-58984796d6-6dnzt" podStartSLOduration=4.6147287089999995 podStartE2EDuration="4.614728709s" podCreationTimestamp="2025-10-06 21:43:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:43:28.610344543 +0000 UTC m=+753.903381277" watchObservedRunningTime="2025-10-06 21:43:28.614728709 +0000 UTC m=+753.907765463" Oct 06 21:43:28 crc kubenswrapper[5014]: I1006 21:43:28.628182 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-b55d98d7d-6vw24" podStartSLOduration=4.628152675 podStartE2EDuration="4.628152675s" podCreationTimestamp="2025-10-06 21:43:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:43:28.626340438 +0000 UTC m=+753.919377172" watchObservedRunningTime="2025-10-06 21:43:28.628152675 +0000 UTC m=+753.921189409" Oct 06 21:43:36 crc kubenswrapper[5014]: I1006 21:43:36.432328 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-shqhf" Oct 06 21:43:48 crc kubenswrapper[5014]: I1006 21:43:48.989351 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-v9d9w"] Oct 06 21:43:48 crc kubenswrapper[5014]: I1006 21:43:48.994840 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:43:49 crc kubenswrapper[5014]: I1006 21:43:49.004833 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v9d9w"] Oct 06 21:43:49 crc kubenswrapper[5014]: I1006 21:43:49.171782 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j98sd\" (UniqueName: \"kubernetes.io/projected/4e86d310-98ee-4124-8c72-80f660041829-kube-api-access-j98sd\") pod \"redhat-operators-v9d9w\" (UID: \"4e86d310-98ee-4124-8c72-80f660041829\") " pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:43:49 crc kubenswrapper[5014]: I1006 21:43:49.171823 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e86d310-98ee-4124-8c72-80f660041829-utilities\") pod \"redhat-operators-v9d9w\" (UID: \"4e86d310-98ee-4124-8c72-80f660041829\") " pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:43:49 crc kubenswrapper[5014]: I1006 21:43:49.171894 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e86d310-98ee-4124-8c72-80f660041829-catalog-content\") pod \"redhat-operators-v9d9w\" (UID: \"4e86d310-98ee-4124-8c72-80f660041829\") " pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:43:49 crc kubenswrapper[5014]: I1006 21:43:49.272767 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e86d310-98ee-4124-8c72-80f660041829-catalog-content\") pod \"redhat-operators-v9d9w\" (UID: \"4e86d310-98ee-4124-8c72-80f660041829\") " pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:43:49 crc kubenswrapper[5014]: I1006 21:43:49.272869 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j98sd\" (UniqueName: \"kubernetes.io/projected/4e86d310-98ee-4124-8c72-80f660041829-kube-api-access-j98sd\") pod \"redhat-operators-v9d9w\" (UID: \"4e86d310-98ee-4124-8c72-80f660041829\") " pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:43:49 crc kubenswrapper[5014]: I1006 21:43:49.272894 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e86d310-98ee-4124-8c72-80f660041829-utilities\") pod \"redhat-operators-v9d9w\" (UID: \"4e86d310-98ee-4124-8c72-80f660041829\") " pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:43:49 crc kubenswrapper[5014]: I1006 21:43:49.273237 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e86d310-98ee-4124-8c72-80f660041829-catalog-content\") pod \"redhat-operators-v9d9w\" (UID: \"4e86d310-98ee-4124-8c72-80f660041829\") " pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:43:49 crc kubenswrapper[5014]: I1006 21:43:49.273307 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e86d310-98ee-4124-8c72-80f660041829-utilities\") pod \"redhat-operators-v9d9w\" (UID: \"4e86d310-98ee-4124-8c72-80f660041829\") " pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:43:49 crc kubenswrapper[5014]: I1006 21:43:49.302339 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j98sd\" (UniqueName: \"kubernetes.io/projected/4e86d310-98ee-4124-8c72-80f660041829-kube-api-access-j98sd\") pod \"redhat-operators-v9d9w\" (UID: \"4e86d310-98ee-4124-8c72-80f660041829\") " pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:43:49 crc kubenswrapper[5014]: I1006 21:43:49.325304 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:43:49 crc kubenswrapper[5014]: I1006 21:43:49.810267 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v9d9w"] Oct 06 21:43:50 crc kubenswrapper[5014]: I1006 21:43:50.790919 5014 generic.go:334] "Generic (PLEG): container finished" podID="4e86d310-98ee-4124-8c72-80f660041829" containerID="9e088073fb45debce62eebfc0180c7fb6ad4411f0a3818fdbe9cd8074a1bb40a" exitCode=0 Oct 06 21:43:50 crc kubenswrapper[5014]: I1006 21:43:50.790999 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v9d9w" event={"ID":"4e86d310-98ee-4124-8c72-80f660041829","Type":"ContainerDied","Data":"9e088073fb45debce62eebfc0180c7fb6ad4411f0a3818fdbe9cd8074a1bb40a"} Oct 06 21:43:50 crc kubenswrapper[5014]: I1006 21:43:50.791074 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v9d9w" event={"ID":"4e86d310-98ee-4124-8c72-80f660041829","Type":"ContainerStarted","Data":"1dcac277f2ffde7a16598f7d46d28e6ed084ed69cdeba6e71106932d09f5bdb6"} Oct 06 21:43:50 crc kubenswrapper[5014]: I1006 21:43:50.820914 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l"] Oct 06 21:43:50 crc kubenswrapper[5014]: I1006 21:43:50.834533 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" Oct 06 21:43:50 crc kubenswrapper[5014]: I1006 21:43:50.840220 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Oct 06 21:43:50 crc kubenswrapper[5014]: I1006 21:43:50.853755 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l"] Oct 06 21:43:50 crc kubenswrapper[5014]: I1006 21:43:50.995502 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-util\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l\" (UID: \"7ce534c4-b596-45d1-9bf5-6573f8aa71e1\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" Oct 06 21:43:50 crc kubenswrapper[5014]: I1006 21:43:50.995587 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2kjk\" (UniqueName: \"kubernetes.io/projected/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-kube-api-access-j2kjk\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l\" (UID: \"7ce534c4-b596-45d1-9bf5-6573f8aa71e1\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" Oct 06 21:43:50 crc kubenswrapper[5014]: I1006 21:43:50.995899 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-bundle\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l\" (UID: \"7ce534c4-b596-45d1-9bf5-6573f8aa71e1\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" Oct 06 21:43:51 crc kubenswrapper[5014]: I1006 21:43:51.097437 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-bundle\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l\" (UID: \"7ce534c4-b596-45d1-9bf5-6573f8aa71e1\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" Oct 06 21:43:51 crc kubenswrapper[5014]: I1006 21:43:51.098233 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2kjk\" (UniqueName: \"kubernetes.io/projected/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-kube-api-access-j2kjk\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l\" (UID: \"7ce534c4-b596-45d1-9bf5-6573f8aa71e1\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" Oct 06 21:43:51 crc kubenswrapper[5014]: I1006 21:43:51.098295 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-util\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l\" (UID: \"7ce534c4-b596-45d1-9bf5-6573f8aa71e1\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" Oct 06 21:43:51 crc kubenswrapper[5014]: I1006 21:43:51.098778 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-bundle\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l\" (UID: \"7ce534c4-b596-45d1-9bf5-6573f8aa71e1\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" Oct 06 21:43:51 crc kubenswrapper[5014]: I1006 21:43:51.098918 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-util\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l\" (UID: \"7ce534c4-b596-45d1-9bf5-6573f8aa71e1\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" Oct 06 21:43:51 crc kubenswrapper[5014]: I1006 21:43:51.127197 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2kjk\" (UniqueName: \"kubernetes.io/projected/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-kube-api-access-j2kjk\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l\" (UID: \"7ce534c4-b596-45d1-9bf5-6573f8aa71e1\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" Oct 06 21:43:51 crc kubenswrapper[5014]: I1006 21:43:51.185486 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" Oct 06 21:43:51 crc kubenswrapper[5014]: I1006 21:43:51.646058 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l"] Oct 06 21:43:51 crc kubenswrapper[5014]: W1006 21:43:51.653605 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7ce534c4_b596_45d1_9bf5_6573f8aa71e1.slice/crio-00f183412f6c4a5317c39598014af80ef4c5c79412461f1c67be79744c3144b4 WatchSource:0}: Error finding container 00f183412f6c4a5317c39598014af80ef4c5c79412461f1c67be79744c3144b4: Status 404 returned error can't find the container with id 00f183412f6c4a5317c39598014af80ef4c5c79412461f1c67be79744c3144b4 Oct 06 21:43:51 crc kubenswrapper[5014]: I1006 21:43:51.703881 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-kb2p5" podUID="43df857e-f7f9-45e8-97e7-21adc3167678" containerName="console" containerID="cri-o://56a557245460d4db4647f6ddf232ee62107e90c59362f0e7708b594aa6ce5849" gracePeriod=15 Oct 06 21:43:51 crc kubenswrapper[5014]: I1006 21:43:51.734878 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:43:51 crc kubenswrapper[5014]: I1006 21:43:51.734930 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:43:51 crc kubenswrapper[5014]: I1006 21:43:51.799666 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v9d9w" event={"ID":"4e86d310-98ee-4124-8c72-80f660041829","Type":"ContainerStarted","Data":"27858a8bbe385f59d4d2bc4c01cb7e1d11eaf1c5f89209f02adf17ff03b20fbb"} Oct 06 21:43:51 crc kubenswrapper[5014]: I1006 21:43:51.800872 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" event={"ID":"7ce534c4-b596-45d1-9bf5-6573f8aa71e1","Type":"ContainerStarted","Data":"00f183412f6c4a5317c39598014af80ef4c5c79412461f1c67be79744c3144b4"} Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.238050 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-kb2p5_43df857e-f7f9-45e8-97e7-21adc3167678/console/0.log" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.238159 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.415834 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43df857e-f7f9-45e8-97e7-21adc3167678-console-oauth-config\") pod \"43df857e-f7f9-45e8-97e7-21adc3167678\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.416537 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43df857e-f7f9-45e8-97e7-21adc3167678-console-serving-cert\") pod \"43df857e-f7f9-45e8-97e7-21adc3167678\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.416662 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-service-ca\") pod \"43df857e-f7f9-45e8-97e7-21adc3167678\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.418578 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-service-ca" (OuterVolumeSpecName: "service-ca") pod "43df857e-f7f9-45e8-97e7-21adc3167678" (UID: "43df857e-f7f9-45e8-97e7-21adc3167678"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.419891 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43df857e-f7f9-45e8-97e7-21adc3167678" (UID: "43df857e-f7f9-45e8-97e7-21adc3167678"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.420030 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-trusted-ca-bundle\") pod \"43df857e-f7f9-45e8-97e7-21adc3167678\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.420164 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7nsg\" (UniqueName: \"kubernetes.io/projected/43df857e-f7f9-45e8-97e7-21adc3167678-kube-api-access-m7nsg\") pod \"43df857e-f7f9-45e8-97e7-21adc3167678\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.420917 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-console-config\") pod \"43df857e-f7f9-45e8-97e7-21adc3167678\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.421056 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-oauth-serving-cert\") pod \"43df857e-f7f9-45e8-97e7-21adc3167678\" (UID: \"43df857e-f7f9-45e8-97e7-21adc3167678\") " Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.422219 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43df857e-f7f9-45e8-97e7-21adc3167678" (UID: "43df857e-f7f9-45e8-97e7-21adc3167678"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.422522 5014 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-service-ca\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.422611 5014 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.422846 5014 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.423270 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-console-config" (OuterVolumeSpecName: "console-config") pod "43df857e-f7f9-45e8-97e7-21adc3167678" (UID: "43df857e-f7f9-45e8-97e7-21adc3167678"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.425360 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43df857e-f7f9-45e8-97e7-21adc3167678-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43df857e-f7f9-45e8-97e7-21adc3167678" (UID: "43df857e-f7f9-45e8-97e7-21adc3167678"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.425768 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43df857e-f7f9-45e8-97e7-21adc3167678-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43df857e-f7f9-45e8-97e7-21adc3167678" (UID: "43df857e-f7f9-45e8-97e7-21adc3167678"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.427168 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43df857e-f7f9-45e8-97e7-21adc3167678-kube-api-access-m7nsg" (OuterVolumeSpecName: "kube-api-access-m7nsg") pod "43df857e-f7f9-45e8-97e7-21adc3167678" (UID: "43df857e-f7f9-45e8-97e7-21adc3167678"). InnerVolumeSpecName "kube-api-access-m7nsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.523795 5014 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43df857e-f7f9-45e8-97e7-21adc3167678-console-oauth-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.523830 5014 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43df857e-f7f9-45e8-97e7-21adc3167678-console-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.523842 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7nsg\" (UniqueName: \"kubernetes.io/projected/43df857e-f7f9-45e8-97e7-21adc3167678-kube-api-access-m7nsg\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.523857 5014 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43df857e-f7f9-45e8-97e7-21adc3167678-console-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.812385 5014 generic.go:334] "Generic (PLEG): container finished" podID="7ce534c4-b596-45d1-9bf5-6573f8aa71e1" containerID="08e0a6123342e3a02034b8557588790c60efa00a1307c89c48de378a5a230f24" exitCode=0 Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.812486 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" event={"ID":"7ce534c4-b596-45d1-9bf5-6573f8aa71e1","Type":"ContainerDied","Data":"08e0a6123342e3a02034b8557588790c60efa00a1307c89c48de378a5a230f24"} Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.815176 5014 generic.go:334] "Generic (PLEG): container finished" podID="4e86d310-98ee-4124-8c72-80f660041829" containerID="27858a8bbe385f59d4d2bc4c01cb7e1d11eaf1c5f89209f02adf17ff03b20fbb" exitCode=0 Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.815245 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v9d9w" event={"ID":"4e86d310-98ee-4124-8c72-80f660041829","Type":"ContainerDied","Data":"27858a8bbe385f59d4d2bc4c01cb7e1d11eaf1c5f89209f02adf17ff03b20fbb"} Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.822696 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-kb2p5_43df857e-f7f9-45e8-97e7-21adc3167678/console/0.log" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.822755 5014 generic.go:334] "Generic (PLEG): container finished" podID="43df857e-f7f9-45e8-97e7-21adc3167678" containerID="56a557245460d4db4647f6ddf232ee62107e90c59362f0e7708b594aa6ce5849" exitCode=2 Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.822795 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-kb2p5" event={"ID":"43df857e-f7f9-45e8-97e7-21adc3167678","Type":"ContainerDied","Data":"56a557245460d4db4647f6ddf232ee62107e90c59362f0e7708b594aa6ce5849"} Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.822832 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-kb2p5" event={"ID":"43df857e-f7f9-45e8-97e7-21adc3167678","Type":"ContainerDied","Data":"a64cfac1ca85d19f95c45f010bd9cff708b14311ff1f5c5dc57021467f8cbd50"} Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.822858 5014 scope.go:117] "RemoveContainer" containerID="56a557245460d4db4647f6ddf232ee62107e90c59362f0e7708b594aa6ce5849" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.823021 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-kb2p5" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.856222 5014 scope.go:117] "RemoveContainer" containerID="56a557245460d4db4647f6ddf232ee62107e90c59362f0e7708b594aa6ce5849" Oct 06 21:43:52 crc kubenswrapper[5014]: E1006 21:43:52.856932 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56a557245460d4db4647f6ddf232ee62107e90c59362f0e7708b594aa6ce5849\": container with ID starting with 56a557245460d4db4647f6ddf232ee62107e90c59362f0e7708b594aa6ce5849 not found: ID does not exist" containerID="56a557245460d4db4647f6ddf232ee62107e90c59362f0e7708b594aa6ce5849" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.856962 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56a557245460d4db4647f6ddf232ee62107e90c59362f0e7708b594aa6ce5849"} err="failed to get container status \"56a557245460d4db4647f6ddf232ee62107e90c59362f0e7708b594aa6ce5849\": rpc error: code = NotFound desc = could not find container \"56a557245460d4db4647f6ddf232ee62107e90c59362f0e7708b594aa6ce5849\": container with ID starting with 56a557245460d4db4647f6ddf232ee62107e90c59362f0e7708b594aa6ce5849 not found: ID does not exist" Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.883554 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-kb2p5"] Oct 06 21:43:52 crc kubenswrapper[5014]: I1006 21:43:52.886935 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-kb2p5"] Oct 06 21:43:53 crc kubenswrapper[5014]: I1006 21:43:53.491641 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43df857e-f7f9-45e8-97e7-21adc3167678" path="/var/lib/kubelet/pods/43df857e-f7f9-45e8-97e7-21adc3167678/volumes" Oct 06 21:43:53 crc kubenswrapper[5014]: I1006 21:43:53.834006 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v9d9w" event={"ID":"4e86d310-98ee-4124-8c72-80f660041829","Type":"ContainerStarted","Data":"02388d0da094e7bd896e23c63093a4ef35b552799d130e6e8114ef3769de85ef"} Oct 06 21:43:54 crc kubenswrapper[5014]: I1006 21:43:54.847875 5014 generic.go:334] "Generic (PLEG): container finished" podID="7ce534c4-b596-45d1-9bf5-6573f8aa71e1" containerID="df138ec3496e63f7409e16b2a66b81016bf7a18ffb86b7bba9566304f5ff89c5" exitCode=0 Oct 06 21:43:54 crc kubenswrapper[5014]: I1006 21:43:54.847968 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" event={"ID":"7ce534c4-b596-45d1-9bf5-6573f8aa71e1","Type":"ContainerDied","Data":"df138ec3496e63f7409e16b2a66b81016bf7a18ffb86b7bba9566304f5ff89c5"} Oct 06 21:43:54 crc kubenswrapper[5014]: I1006 21:43:54.870923 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-v9d9w" podStartSLOduration=4.418959658 podStartE2EDuration="6.870878117s" podCreationTimestamp="2025-10-06 21:43:48 +0000 UTC" firstStartedPulling="2025-10-06 21:43:50.79326917 +0000 UTC m=+776.086305944" lastFinishedPulling="2025-10-06 21:43:53.245187619 +0000 UTC m=+778.538224403" observedRunningTime="2025-10-06 21:43:53.8647908 +0000 UTC m=+779.157827594" watchObservedRunningTime="2025-10-06 21:43:54.870878117 +0000 UTC m=+780.163914861" Oct 06 21:43:55 crc kubenswrapper[5014]: I1006 21:43:55.867125 5014 generic.go:334] "Generic (PLEG): container finished" podID="7ce534c4-b596-45d1-9bf5-6573f8aa71e1" containerID="23394b0de3d1d976d47cf53a548697c0ce0bdc826e8908fef531d3c1a2abca62" exitCode=0 Oct 06 21:43:55 crc kubenswrapper[5014]: I1006 21:43:55.867325 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" event={"ID":"7ce534c4-b596-45d1-9bf5-6573f8aa71e1","Type":"ContainerDied","Data":"23394b0de3d1d976d47cf53a548697c0ce0bdc826e8908fef531d3c1a2abca62"} Oct 06 21:43:57 crc kubenswrapper[5014]: I1006 21:43:57.326788 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" Oct 06 21:43:57 crc kubenswrapper[5014]: I1006 21:43:57.389145 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2kjk\" (UniqueName: \"kubernetes.io/projected/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-kube-api-access-j2kjk\") pod \"7ce534c4-b596-45d1-9bf5-6573f8aa71e1\" (UID: \"7ce534c4-b596-45d1-9bf5-6573f8aa71e1\") " Oct 06 21:43:57 crc kubenswrapper[5014]: I1006 21:43:57.389243 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-bundle\") pod \"7ce534c4-b596-45d1-9bf5-6573f8aa71e1\" (UID: \"7ce534c4-b596-45d1-9bf5-6573f8aa71e1\") " Oct 06 21:43:57 crc kubenswrapper[5014]: I1006 21:43:57.389348 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-util\") pod \"7ce534c4-b596-45d1-9bf5-6573f8aa71e1\" (UID: \"7ce534c4-b596-45d1-9bf5-6573f8aa71e1\") " Oct 06 21:43:57 crc kubenswrapper[5014]: I1006 21:43:57.390374 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-bundle" (OuterVolumeSpecName: "bundle") pod "7ce534c4-b596-45d1-9bf5-6573f8aa71e1" (UID: "7ce534c4-b596-45d1-9bf5-6573f8aa71e1"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:43:57 crc kubenswrapper[5014]: I1006 21:43:57.395079 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-kube-api-access-j2kjk" (OuterVolumeSpecName: "kube-api-access-j2kjk") pod "7ce534c4-b596-45d1-9bf5-6573f8aa71e1" (UID: "7ce534c4-b596-45d1-9bf5-6573f8aa71e1"). InnerVolumeSpecName "kube-api-access-j2kjk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:43:57 crc kubenswrapper[5014]: I1006 21:43:57.490822 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2kjk\" (UniqueName: \"kubernetes.io/projected/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-kube-api-access-j2kjk\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:57 crc kubenswrapper[5014]: I1006 21:43:57.490869 5014 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:57 crc kubenswrapper[5014]: I1006 21:43:57.885962 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" event={"ID":"7ce534c4-b596-45d1-9bf5-6573f8aa71e1","Type":"ContainerDied","Data":"00f183412f6c4a5317c39598014af80ef4c5c79412461f1c67be79744c3144b4"} Oct 06 21:43:57 crc kubenswrapper[5014]: I1006 21:43:57.886042 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00f183412f6c4a5317c39598014af80ef4c5c79412461f1c67be79744c3144b4" Oct 06 21:43:57 crc kubenswrapper[5014]: I1006 21:43:57.886121 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l" Oct 06 21:43:58 crc kubenswrapper[5014]: I1006 21:43:58.381263 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-util" (OuterVolumeSpecName: "util") pod "7ce534c4-b596-45d1-9bf5-6573f8aa71e1" (UID: "7ce534c4-b596-45d1-9bf5-6573f8aa71e1"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:43:58 crc kubenswrapper[5014]: I1006 21:43:58.406740 5014 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7ce534c4-b596-45d1-9bf5-6573f8aa71e1-util\") on node \"crc\" DevicePath \"\"" Oct 06 21:43:59 crc kubenswrapper[5014]: I1006 21:43:59.326496 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:43:59 crc kubenswrapper[5014]: I1006 21:43:59.327147 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:43:59 crc kubenswrapper[5014]: I1006 21:43:59.397094 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:43:59 crc kubenswrapper[5014]: I1006 21:43:59.951090 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:44:01 crc kubenswrapper[5014]: I1006 21:44:01.744937 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v9d9w"] Oct 06 21:44:02 crc kubenswrapper[5014]: I1006 21:44:02.915811 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-v9d9w" podUID="4e86d310-98ee-4124-8c72-80f660041829" containerName="registry-server" containerID="cri-o://02388d0da094e7bd896e23c63093a4ef35b552799d130e6e8114ef3769de85ef" gracePeriod=2 Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.405835 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.414964 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j98sd\" (UniqueName: \"kubernetes.io/projected/4e86d310-98ee-4124-8c72-80f660041829-kube-api-access-j98sd\") pod \"4e86d310-98ee-4124-8c72-80f660041829\" (UID: \"4e86d310-98ee-4124-8c72-80f660041829\") " Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.414998 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e86d310-98ee-4124-8c72-80f660041829-utilities\") pod \"4e86d310-98ee-4124-8c72-80f660041829\" (UID: \"4e86d310-98ee-4124-8c72-80f660041829\") " Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.415035 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e86d310-98ee-4124-8c72-80f660041829-catalog-content\") pod \"4e86d310-98ee-4124-8c72-80f660041829\" (UID: \"4e86d310-98ee-4124-8c72-80f660041829\") " Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.416839 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e86d310-98ee-4124-8c72-80f660041829-utilities" (OuterVolumeSpecName: "utilities") pod "4e86d310-98ee-4124-8c72-80f660041829" (UID: "4e86d310-98ee-4124-8c72-80f660041829"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.424017 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e86d310-98ee-4124-8c72-80f660041829-kube-api-access-j98sd" (OuterVolumeSpecName: "kube-api-access-j98sd") pod "4e86d310-98ee-4124-8c72-80f660041829" (UID: "4e86d310-98ee-4124-8c72-80f660041829"). InnerVolumeSpecName "kube-api-access-j98sd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.500344 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e86d310-98ee-4124-8c72-80f660041829-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4e86d310-98ee-4124-8c72-80f660041829" (UID: "4e86d310-98ee-4124-8c72-80f660041829"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.516799 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j98sd\" (UniqueName: \"kubernetes.io/projected/4e86d310-98ee-4124-8c72-80f660041829-kube-api-access-j98sd\") on node \"crc\" DevicePath \"\"" Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.516921 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e86d310-98ee-4124-8c72-80f660041829-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.516943 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e86d310-98ee-4124-8c72-80f660041829-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.931658 5014 generic.go:334] "Generic (PLEG): container finished" podID="4e86d310-98ee-4124-8c72-80f660041829" containerID="02388d0da094e7bd896e23c63093a4ef35b552799d130e6e8114ef3769de85ef" exitCode=0 Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.931720 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v9d9w" Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.931717 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v9d9w" event={"ID":"4e86d310-98ee-4124-8c72-80f660041829","Type":"ContainerDied","Data":"02388d0da094e7bd896e23c63093a4ef35b552799d130e6e8114ef3769de85ef"} Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.931873 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v9d9w" event={"ID":"4e86d310-98ee-4124-8c72-80f660041829","Type":"ContainerDied","Data":"1dcac277f2ffde7a16598f7d46d28e6ed084ed69cdeba6e71106932d09f5bdb6"} Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.931908 5014 scope.go:117] "RemoveContainer" containerID="02388d0da094e7bd896e23c63093a4ef35b552799d130e6e8114ef3769de85ef" Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.955442 5014 scope.go:117] "RemoveContainer" containerID="27858a8bbe385f59d4d2bc4c01cb7e1d11eaf1c5f89209f02adf17ff03b20fbb" Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.970593 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v9d9w"] Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.979381 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-v9d9w"] Oct 06 21:44:04 crc kubenswrapper[5014]: I1006 21:44:04.996857 5014 scope.go:117] "RemoveContainer" containerID="9e088073fb45debce62eebfc0180c7fb6ad4411f0a3818fdbe9cd8074a1bb40a" Oct 06 21:44:05 crc kubenswrapper[5014]: I1006 21:44:05.023506 5014 scope.go:117] "RemoveContainer" containerID="02388d0da094e7bd896e23c63093a4ef35b552799d130e6e8114ef3769de85ef" Oct 06 21:44:05 crc kubenswrapper[5014]: E1006 21:44:05.024162 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02388d0da094e7bd896e23c63093a4ef35b552799d130e6e8114ef3769de85ef\": container with ID starting with 02388d0da094e7bd896e23c63093a4ef35b552799d130e6e8114ef3769de85ef not found: ID does not exist" containerID="02388d0da094e7bd896e23c63093a4ef35b552799d130e6e8114ef3769de85ef" Oct 06 21:44:05 crc kubenswrapper[5014]: I1006 21:44:05.024229 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02388d0da094e7bd896e23c63093a4ef35b552799d130e6e8114ef3769de85ef"} err="failed to get container status \"02388d0da094e7bd896e23c63093a4ef35b552799d130e6e8114ef3769de85ef\": rpc error: code = NotFound desc = could not find container \"02388d0da094e7bd896e23c63093a4ef35b552799d130e6e8114ef3769de85ef\": container with ID starting with 02388d0da094e7bd896e23c63093a4ef35b552799d130e6e8114ef3769de85ef not found: ID does not exist" Oct 06 21:44:05 crc kubenswrapper[5014]: I1006 21:44:05.024270 5014 scope.go:117] "RemoveContainer" containerID="27858a8bbe385f59d4d2bc4c01cb7e1d11eaf1c5f89209f02adf17ff03b20fbb" Oct 06 21:44:05 crc kubenswrapper[5014]: E1006 21:44:05.024819 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27858a8bbe385f59d4d2bc4c01cb7e1d11eaf1c5f89209f02adf17ff03b20fbb\": container with ID starting with 27858a8bbe385f59d4d2bc4c01cb7e1d11eaf1c5f89209f02adf17ff03b20fbb not found: ID does not exist" containerID="27858a8bbe385f59d4d2bc4c01cb7e1d11eaf1c5f89209f02adf17ff03b20fbb" Oct 06 21:44:05 crc kubenswrapper[5014]: I1006 21:44:05.024883 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27858a8bbe385f59d4d2bc4c01cb7e1d11eaf1c5f89209f02adf17ff03b20fbb"} err="failed to get container status \"27858a8bbe385f59d4d2bc4c01cb7e1d11eaf1c5f89209f02adf17ff03b20fbb\": rpc error: code = NotFound desc = could not find container \"27858a8bbe385f59d4d2bc4c01cb7e1d11eaf1c5f89209f02adf17ff03b20fbb\": container with ID starting with 27858a8bbe385f59d4d2bc4c01cb7e1d11eaf1c5f89209f02adf17ff03b20fbb not found: ID does not exist" Oct 06 21:44:05 crc kubenswrapper[5014]: I1006 21:44:05.024937 5014 scope.go:117] "RemoveContainer" containerID="9e088073fb45debce62eebfc0180c7fb6ad4411f0a3818fdbe9cd8074a1bb40a" Oct 06 21:44:05 crc kubenswrapper[5014]: E1006 21:44:05.025431 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e088073fb45debce62eebfc0180c7fb6ad4411f0a3818fdbe9cd8074a1bb40a\": container with ID starting with 9e088073fb45debce62eebfc0180c7fb6ad4411f0a3818fdbe9cd8074a1bb40a not found: ID does not exist" containerID="9e088073fb45debce62eebfc0180c7fb6ad4411f0a3818fdbe9cd8074a1bb40a" Oct 06 21:44:05 crc kubenswrapper[5014]: I1006 21:44:05.025453 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e088073fb45debce62eebfc0180c7fb6ad4411f0a3818fdbe9cd8074a1bb40a"} err="failed to get container status \"9e088073fb45debce62eebfc0180c7fb6ad4411f0a3818fdbe9cd8074a1bb40a\": rpc error: code = NotFound desc = could not find container \"9e088073fb45debce62eebfc0180c7fb6ad4411f0a3818fdbe9cd8074a1bb40a\": container with ID starting with 9e088073fb45debce62eebfc0180c7fb6ad4411f0a3818fdbe9cd8074a1bb40a not found: ID does not exist" Oct 06 21:44:05 crc kubenswrapper[5014]: I1006 21:44:05.491699 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e86d310-98ee-4124-8c72-80f660041829" path="/var/lib/kubelet/pods/4e86d310-98ee-4124-8c72-80f660041829/volumes" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.346039 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz"] Oct 06 21:44:06 crc kubenswrapper[5014]: E1006 21:44:06.346557 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43df857e-f7f9-45e8-97e7-21adc3167678" containerName="console" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.346568 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="43df857e-f7f9-45e8-97e7-21adc3167678" containerName="console" Oct 06 21:44:06 crc kubenswrapper[5014]: E1006 21:44:06.346583 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ce534c4-b596-45d1-9bf5-6573f8aa71e1" containerName="pull" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.346589 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ce534c4-b596-45d1-9bf5-6573f8aa71e1" containerName="pull" Oct 06 21:44:06 crc kubenswrapper[5014]: E1006 21:44:06.346601 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e86d310-98ee-4124-8c72-80f660041829" containerName="registry-server" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.346608 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e86d310-98ee-4124-8c72-80f660041829" containerName="registry-server" Oct 06 21:44:06 crc kubenswrapper[5014]: E1006 21:44:06.346638 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e86d310-98ee-4124-8c72-80f660041829" containerName="extract-content" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.346646 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e86d310-98ee-4124-8c72-80f660041829" containerName="extract-content" Oct 06 21:44:06 crc kubenswrapper[5014]: E1006 21:44:06.346656 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ce534c4-b596-45d1-9bf5-6573f8aa71e1" containerName="extract" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.346661 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ce534c4-b596-45d1-9bf5-6573f8aa71e1" containerName="extract" Oct 06 21:44:06 crc kubenswrapper[5014]: E1006 21:44:06.346672 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ce534c4-b596-45d1-9bf5-6573f8aa71e1" containerName="util" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.346677 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ce534c4-b596-45d1-9bf5-6573f8aa71e1" containerName="util" Oct 06 21:44:06 crc kubenswrapper[5014]: E1006 21:44:06.346685 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e86d310-98ee-4124-8c72-80f660041829" containerName="extract-utilities" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.346691 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e86d310-98ee-4124-8c72-80f660041829" containerName="extract-utilities" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.346818 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e86d310-98ee-4124-8c72-80f660041829" containerName="registry-server" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.346830 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ce534c4-b596-45d1-9bf5-6573f8aa71e1" containerName="extract" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.346838 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="43df857e-f7f9-45e8-97e7-21adc3167678" containerName="console" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.347236 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.349018 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.349249 5014 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.349526 5014 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-dxqsp" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.349685 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.349824 5014 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.359306 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz"] Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.447750 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ad5a4e1b-2c12-4166-8be6-0ffce1256bf3-webhook-cert\") pod \"metallb-operator-controller-manager-6d98768845-hrhnz\" (UID: \"ad5a4e1b-2c12-4166-8be6-0ffce1256bf3\") " pod="metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.447814 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpdhj\" (UniqueName: \"kubernetes.io/projected/ad5a4e1b-2c12-4166-8be6-0ffce1256bf3-kube-api-access-tpdhj\") pod \"metallb-operator-controller-manager-6d98768845-hrhnz\" (UID: \"ad5a4e1b-2c12-4166-8be6-0ffce1256bf3\") " pod="metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.447838 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ad5a4e1b-2c12-4166-8be6-0ffce1256bf3-apiservice-cert\") pod \"metallb-operator-controller-manager-6d98768845-hrhnz\" (UID: \"ad5a4e1b-2c12-4166-8be6-0ffce1256bf3\") " pod="metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.548475 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ad5a4e1b-2c12-4166-8be6-0ffce1256bf3-webhook-cert\") pod \"metallb-operator-controller-manager-6d98768845-hrhnz\" (UID: \"ad5a4e1b-2c12-4166-8be6-0ffce1256bf3\") " pod="metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.548547 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpdhj\" (UniqueName: \"kubernetes.io/projected/ad5a4e1b-2c12-4166-8be6-0ffce1256bf3-kube-api-access-tpdhj\") pod \"metallb-operator-controller-manager-6d98768845-hrhnz\" (UID: \"ad5a4e1b-2c12-4166-8be6-0ffce1256bf3\") " pod="metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.548569 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ad5a4e1b-2c12-4166-8be6-0ffce1256bf3-apiservice-cert\") pod \"metallb-operator-controller-manager-6d98768845-hrhnz\" (UID: \"ad5a4e1b-2c12-4166-8be6-0ffce1256bf3\") " pod="metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.557383 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ad5a4e1b-2c12-4166-8be6-0ffce1256bf3-apiservice-cert\") pod \"metallb-operator-controller-manager-6d98768845-hrhnz\" (UID: \"ad5a4e1b-2c12-4166-8be6-0ffce1256bf3\") " pod="metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.567741 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ad5a4e1b-2c12-4166-8be6-0ffce1256bf3-webhook-cert\") pod \"metallb-operator-controller-manager-6d98768845-hrhnz\" (UID: \"ad5a4e1b-2c12-4166-8be6-0ffce1256bf3\") " pod="metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.581368 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpdhj\" (UniqueName: \"kubernetes.io/projected/ad5a4e1b-2c12-4166-8be6-0ffce1256bf3-kube-api-access-tpdhj\") pod \"metallb-operator-controller-manager-6d98768845-hrhnz\" (UID: \"ad5a4e1b-2c12-4166-8be6-0ffce1256bf3\") " pod="metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.664136 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.806479 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq"] Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.810192 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.813970 5014 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.814558 5014 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-s66td" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.814689 5014 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.819270 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq"] Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.955234 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f8acd885-8a21-4e3e-acd9-acb8d65202d0-webhook-cert\") pod \"metallb-operator-webhook-server-6d84c4b8d5-p9mhq\" (UID: \"f8acd885-8a21-4e3e-acd9-acb8d65202d0\") " pod="metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.955288 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frxbl\" (UniqueName: \"kubernetes.io/projected/f8acd885-8a21-4e3e-acd9-acb8d65202d0-kube-api-access-frxbl\") pod \"metallb-operator-webhook-server-6d84c4b8d5-p9mhq\" (UID: \"f8acd885-8a21-4e3e-acd9-acb8d65202d0\") " pod="metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq" Oct 06 21:44:06 crc kubenswrapper[5014]: I1006 21:44:06.955336 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f8acd885-8a21-4e3e-acd9-acb8d65202d0-apiservice-cert\") pod \"metallb-operator-webhook-server-6d84c4b8d5-p9mhq\" (UID: \"f8acd885-8a21-4e3e-acd9-acb8d65202d0\") " pod="metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq" Oct 06 21:44:07 crc kubenswrapper[5014]: I1006 21:44:07.057108 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f8acd885-8a21-4e3e-acd9-acb8d65202d0-webhook-cert\") pod \"metallb-operator-webhook-server-6d84c4b8d5-p9mhq\" (UID: \"f8acd885-8a21-4e3e-acd9-acb8d65202d0\") " pod="metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq" Oct 06 21:44:07 crc kubenswrapper[5014]: I1006 21:44:07.057205 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frxbl\" (UniqueName: \"kubernetes.io/projected/f8acd885-8a21-4e3e-acd9-acb8d65202d0-kube-api-access-frxbl\") pod \"metallb-operator-webhook-server-6d84c4b8d5-p9mhq\" (UID: \"f8acd885-8a21-4e3e-acd9-acb8d65202d0\") " pod="metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq" Oct 06 21:44:07 crc kubenswrapper[5014]: I1006 21:44:07.057346 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f8acd885-8a21-4e3e-acd9-acb8d65202d0-apiservice-cert\") pod \"metallb-operator-webhook-server-6d84c4b8d5-p9mhq\" (UID: \"f8acd885-8a21-4e3e-acd9-acb8d65202d0\") " pod="metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq" Oct 06 21:44:07 crc kubenswrapper[5014]: I1006 21:44:07.061453 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f8acd885-8a21-4e3e-acd9-acb8d65202d0-apiservice-cert\") pod \"metallb-operator-webhook-server-6d84c4b8d5-p9mhq\" (UID: \"f8acd885-8a21-4e3e-acd9-acb8d65202d0\") " pod="metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq" Oct 06 21:44:07 crc kubenswrapper[5014]: I1006 21:44:07.062329 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f8acd885-8a21-4e3e-acd9-acb8d65202d0-webhook-cert\") pod \"metallb-operator-webhook-server-6d84c4b8d5-p9mhq\" (UID: \"f8acd885-8a21-4e3e-acd9-acb8d65202d0\") " pod="metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq" Oct 06 21:44:07 crc kubenswrapper[5014]: I1006 21:44:07.076473 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frxbl\" (UniqueName: \"kubernetes.io/projected/f8acd885-8a21-4e3e-acd9-acb8d65202d0-kube-api-access-frxbl\") pod \"metallb-operator-webhook-server-6d84c4b8d5-p9mhq\" (UID: \"f8acd885-8a21-4e3e-acd9-acb8d65202d0\") " pod="metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq" Oct 06 21:44:07 crc kubenswrapper[5014]: I1006 21:44:07.107721 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz"] Oct 06 21:44:07 crc kubenswrapper[5014]: W1006 21:44:07.120093 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad5a4e1b_2c12_4166_8be6_0ffce1256bf3.slice/crio-6f9e572f1443123bbcbae8d3a8fb89817eac55b8c3a806e179a55eb968d324df WatchSource:0}: Error finding container 6f9e572f1443123bbcbae8d3a8fb89817eac55b8c3a806e179a55eb968d324df: Status 404 returned error can't find the container with id 6f9e572f1443123bbcbae8d3a8fb89817eac55b8c3a806e179a55eb968d324df Oct 06 21:44:07 crc kubenswrapper[5014]: I1006 21:44:07.142326 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq" Oct 06 21:44:07 crc kubenswrapper[5014]: I1006 21:44:07.538383 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq"] Oct 06 21:44:07 crc kubenswrapper[5014]: W1006 21:44:07.549102 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8acd885_8a21_4e3e_acd9_acb8d65202d0.slice/crio-e92e1e334bf6a238d75e77393e95202724611cc24cf1acbdfed8b315d71854be WatchSource:0}: Error finding container e92e1e334bf6a238d75e77393e95202724611cc24cf1acbdfed8b315d71854be: Status 404 returned error can't find the container with id e92e1e334bf6a238d75e77393e95202724611cc24cf1acbdfed8b315d71854be Oct 06 21:44:07 crc kubenswrapper[5014]: I1006 21:44:07.956351 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz" event={"ID":"ad5a4e1b-2c12-4166-8be6-0ffce1256bf3","Type":"ContainerStarted","Data":"6f9e572f1443123bbcbae8d3a8fb89817eac55b8c3a806e179a55eb968d324df"} Oct 06 21:44:07 crc kubenswrapper[5014]: I1006 21:44:07.957943 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq" event={"ID":"f8acd885-8a21-4e3e-acd9-acb8d65202d0","Type":"ContainerStarted","Data":"e92e1e334bf6a238d75e77393e95202724611cc24cf1acbdfed8b315d71854be"} Oct 06 21:44:09 crc kubenswrapper[5014]: I1006 21:44:09.974234 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz" event={"ID":"ad5a4e1b-2c12-4166-8be6-0ffce1256bf3","Type":"ContainerStarted","Data":"241965fd1f1aa9c06072748c052974bd9a4e46f8ca85a16804102bef9db47b33"} Oct 06 21:44:09 crc kubenswrapper[5014]: I1006 21:44:09.974597 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz" Oct 06 21:44:09 crc kubenswrapper[5014]: I1006 21:44:09.993571 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz" podStartSLOduration=1.580500682 podStartE2EDuration="3.993552693s" podCreationTimestamp="2025-10-06 21:44:06 +0000 UTC" firstStartedPulling="2025-10-06 21:44:07.124038824 +0000 UTC m=+792.417075558" lastFinishedPulling="2025-10-06 21:44:09.537090835 +0000 UTC m=+794.830127569" observedRunningTime="2025-10-06 21:44:09.989732862 +0000 UTC m=+795.282769616" watchObservedRunningTime="2025-10-06 21:44:09.993552693 +0000 UTC m=+795.286589437" Oct 06 21:44:11 crc kubenswrapper[5014]: I1006 21:44:11.657401 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-q4g5k"] Oct 06 21:44:11 crc kubenswrapper[5014]: I1006 21:44:11.661154 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:11 crc kubenswrapper[5014]: I1006 21:44:11.668757 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-q4g5k"] Oct 06 21:44:11 crc kubenswrapper[5014]: I1006 21:44:11.738784 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a5d0729-830a-41aa-bcb6-392abe7cbe14-catalog-content\") pod \"redhat-marketplace-q4g5k\" (UID: \"7a5d0729-830a-41aa-bcb6-392abe7cbe14\") " pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:11 crc kubenswrapper[5014]: I1006 21:44:11.738831 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a5d0729-830a-41aa-bcb6-392abe7cbe14-utilities\") pod \"redhat-marketplace-q4g5k\" (UID: \"7a5d0729-830a-41aa-bcb6-392abe7cbe14\") " pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:11 crc kubenswrapper[5014]: I1006 21:44:11.738848 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7q64z\" (UniqueName: \"kubernetes.io/projected/7a5d0729-830a-41aa-bcb6-392abe7cbe14-kube-api-access-7q64z\") pod \"redhat-marketplace-q4g5k\" (UID: \"7a5d0729-830a-41aa-bcb6-392abe7cbe14\") " pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:11 crc kubenswrapper[5014]: I1006 21:44:11.840515 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7q64z\" (UniqueName: \"kubernetes.io/projected/7a5d0729-830a-41aa-bcb6-392abe7cbe14-kube-api-access-7q64z\") pod \"redhat-marketplace-q4g5k\" (UID: \"7a5d0729-830a-41aa-bcb6-392abe7cbe14\") " pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:11 crc kubenswrapper[5014]: I1006 21:44:11.840679 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a5d0729-830a-41aa-bcb6-392abe7cbe14-catalog-content\") pod \"redhat-marketplace-q4g5k\" (UID: \"7a5d0729-830a-41aa-bcb6-392abe7cbe14\") " pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:11 crc kubenswrapper[5014]: I1006 21:44:11.840729 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a5d0729-830a-41aa-bcb6-392abe7cbe14-utilities\") pod \"redhat-marketplace-q4g5k\" (UID: \"7a5d0729-830a-41aa-bcb6-392abe7cbe14\") " pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:11 crc kubenswrapper[5014]: I1006 21:44:11.841236 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a5d0729-830a-41aa-bcb6-392abe7cbe14-catalog-content\") pod \"redhat-marketplace-q4g5k\" (UID: \"7a5d0729-830a-41aa-bcb6-392abe7cbe14\") " pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:11 crc kubenswrapper[5014]: I1006 21:44:11.841321 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a5d0729-830a-41aa-bcb6-392abe7cbe14-utilities\") pod \"redhat-marketplace-q4g5k\" (UID: \"7a5d0729-830a-41aa-bcb6-392abe7cbe14\") " pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:11 crc kubenswrapper[5014]: I1006 21:44:11.864575 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7q64z\" (UniqueName: \"kubernetes.io/projected/7a5d0729-830a-41aa-bcb6-392abe7cbe14-kube-api-access-7q64z\") pod \"redhat-marketplace-q4g5k\" (UID: \"7a5d0729-830a-41aa-bcb6-392abe7cbe14\") " pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:11 crc kubenswrapper[5014]: I1006 21:44:11.987301 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:11 crc kubenswrapper[5014]: I1006 21:44:11.987548 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq" event={"ID":"f8acd885-8a21-4e3e-acd9-acb8d65202d0","Type":"ContainerStarted","Data":"4fec99253d11a6ec42e3d802872e97ba52a4184efbe36cced35553871cc7bd11"} Oct 06 21:44:11 crc kubenswrapper[5014]: I1006 21:44:11.987923 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq" Oct 06 21:44:12 crc kubenswrapper[5014]: I1006 21:44:12.416008 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq" podStartSLOduration=2.320219627 podStartE2EDuration="6.415982679s" podCreationTimestamp="2025-10-06 21:44:06 +0000 UTC" firstStartedPulling="2025-10-06 21:44:07.552687525 +0000 UTC m=+792.845724259" lastFinishedPulling="2025-10-06 21:44:11.648450577 +0000 UTC m=+796.941487311" observedRunningTime="2025-10-06 21:44:12.005415828 +0000 UTC m=+797.298452552" watchObservedRunningTime="2025-10-06 21:44:12.415982679 +0000 UTC m=+797.709019443" Oct 06 21:44:12 crc kubenswrapper[5014]: I1006 21:44:12.419276 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-q4g5k"] Oct 06 21:44:13 crc kubenswrapper[5014]: I1006 21:44:12.999572 5014 generic.go:334] "Generic (PLEG): container finished" podID="7a5d0729-830a-41aa-bcb6-392abe7cbe14" containerID="fc5eb58e910f668dba02f21eba7c75590d1e2818583e127862aa7e99ac341afd" exitCode=0 Oct 06 21:44:13 crc kubenswrapper[5014]: I1006 21:44:12.999686 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q4g5k" event={"ID":"7a5d0729-830a-41aa-bcb6-392abe7cbe14","Type":"ContainerDied","Data":"fc5eb58e910f668dba02f21eba7c75590d1e2818583e127862aa7e99ac341afd"} Oct 06 21:44:13 crc kubenswrapper[5014]: I1006 21:44:13.000154 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q4g5k" event={"ID":"7a5d0729-830a-41aa-bcb6-392abe7cbe14","Type":"ContainerStarted","Data":"fa4b7970281f776f089a80c3d44209bfa9317c46e0d92385f5668aef2c39a7f3"} Oct 06 21:44:14 crc kubenswrapper[5014]: I1006 21:44:14.011059 5014 generic.go:334] "Generic (PLEG): container finished" podID="7a5d0729-830a-41aa-bcb6-392abe7cbe14" containerID="79c45bd87fdc6c37bf08aa9672d6f442e06b25e75ed4a37f1770a02e5d2b0a70" exitCode=0 Oct 06 21:44:14 crc kubenswrapper[5014]: I1006 21:44:14.011147 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q4g5k" event={"ID":"7a5d0729-830a-41aa-bcb6-392abe7cbe14","Type":"ContainerDied","Data":"79c45bd87fdc6c37bf08aa9672d6f442e06b25e75ed4a37f1770a02e5d2b0a70"} Oct 06 21:44:15 crc kubenswrapper[5014]: I1006 21:44:15.018321 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q4g5k" event={"ID":"7a5d0729-830a-41aa-bcb6-392abe7cbe14","Type":"ContainerStarted","Data":"4a77bb2f1614c1b425e262c903e1e97ca1ad63079b2200dbb6645b30c4284a6f"} Oct 06 21:44:15 crc kubenswrapper[5014]: I1006 21:44:15.031403 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-q4g5k" podStartSLOduration=2.583940035 podStartE2EDuration="4.031386519s" podCreationTimestamp="2025-10-06 21:44:11 +0000 UTC" firstStartedPulling="2025-10-06 21:44:13.001672981 +0000 UTC m=+798.294709755" lastFinishedPulling="2025-10-06 21:44:14.449119465 +0000 UTC m=+799.742156239" observedRunningTime="2025-10-06 21:44:15.030917614 +0000 UTC m=+800.323954348" watchObservedRunningTime="2025-10-06 21:44:15.031386519 +0000 UTC m=+800.324423253" Oct 06 21:44:21 crc kubenswrapper[5014]: I1006 21:44:21.735345 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:44:21 crc kubenswrapper[5014]: I1006 21:44:21.735740 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:44:21 crc kubenswrapper[5014]: I1006 21:44:21.735791 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:44:21 crc kubenswrapper[5014]: I1006 21:44:21.736357 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5d102177c0ce6793970da1b086882b162bbee28f8415589d9583b1584188a7df"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 21:44:21 crc kubenswrapper[5014]: I1006 21:44:21.736406 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://5d102177c0ce6793970da1b086882b162bbee28f8415589d9583b1584188a7df" gracePeriod=600 Oct 06 21:44:21 crc kubenswrapper[5014]: I1006 21:44:21.987804 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:21 crc kubenswrapper[5014]: I1006 21:44:21.987928 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:22 crc kubenswrapper[5014]: I1006 21:44:22.046871 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:22 crc kubenswrapper[5014]: I1006 21:44:22.099278 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="5d102177c0ce6793970da1b086882b162bbee28f8415589d9583b1584188a7df" exitCode=0 Oct 06 21:44:22 crc kubenswrapper[5014]: I1006 21:44:22.099475 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"5d102177c0ce6793970da1b086882b162bbee28f8415589d9583b1584188a7df"} Oct 06 21:44:22 crc kubenswrapper[5014]: I1006 21:44:22.100300 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"067aa2b7990476eda8f037f84b90fe4366def589071101170672b653a6809121"} Oct 06 21:44:22 crc kubenswrapper[5014]: I1006 21:44:22.100356 5014 scope.go:117] "RemoveContainer" containerID="2875d948827114f05dc24f63c808bf8cba2bc805a548fae859881eca3487971f" Oct 06 21:44:22 crc kubenswrapper[5014]: I1006 21:44:22.145399 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:22 crc kubenswrapper[5014]: I1006 21:44:22.956063 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jjp88"] Oct 06 21:44:22 crc kubenswrapper[5014]: I1006 21:44:22.957944 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:22 crc kubenswrapper[5014]: I1006 21:44:22.973644 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jjp88"] Oct 06 21:44:23 crc kubenswrapper[5014]: I1006 21:44:23.093193 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcqtq\" (UniqueName: \"kubernetes.io/projected/ab6482c9-45bf-4770-8802-200ccb1732f9-kube-api-access-gcqtq\") pod \"certified-operators-jjp88\" (UID: \"ab6482c9-45bf-4770-8802-200ccb1732f9\") " pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:23 crc kubenswrapper[5014]: I1006 21:44:23.093833 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab6482c9-45bf-4770-8802-200ccb1732f9-utilities\") pod \"certified-operators-jjp88\" (UID: \"ab6482c9-45bf-4770-8802-200ccb1732f9\") " pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:23 crc kubenswrapper[5014]: I1006 21:44:23.093960 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab6482c9-45bf-4770-8802-200ccb1732f9-catalog-content\") pod \"certified-operators-jjp88\" (UID: \"ab6482c9-45bf-4770-8802-200ccb1732f9\") " pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:23 crc kubenswrapper[5014]: I1006 21:44:23.195970 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcqtq\" (UniqueName: \"kubernetes.io/projected/ab6482c9-45bf-4770-8802-200ccb1732f9-kube-api-access-gcqtq\") pod \"certified-operators-jjp88\" (UID: \"ab6482c9-45bf-4770-8802-200ccb1732f9\") " pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:23 crc kubenswrapper[5014]: I1006 21:44:23.196035 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab6482c9-45bf-4770-8802-200ccb1732f9-utilities\") pod \"certified-operators-jjp88\" (UID: \"ab6482c9-45bf-4770-8802-200ccb1732f9\") " pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:23 crc kubenswrapper[5014]: I1006 21:44:23.196056 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab6482c9-45bf-4770-8802-200ccb1732f9-catalog-content\") pod \"certified-operators-jjp88\" (UID: \"ab6482c9-45bf-4770-8802-200ccb1732f9\") " pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:23 crc kubenswrapper[5014]: I1006 21:44:23.196982 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab6482c9-45bf-4770-8802-200ccb1732f9-utilities\") pod \"certified-operators-jjp88\" (UID: \"ab6482c9-45bf-4770-8802-200ccb1732f9\") " pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:23 crc kubenswrapper[5014]: I1006 21:44:23.197087 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab6482c9-45bf-4770-8802-200ccb1732f9-catalog-content\") pod \"certified-operators-jjp88\" (UID: \"ab6482c9-45bf-4770-8802-200ccb1732f9\") " pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:23 crc kubenswrapper[5014]: I1006 21:44:23.233017 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcqtq\" (UniqueName: \"kubernetes.io/projected/ab6482c9-45bf-4770-8802-200ccb1732f9-kube-api-access-gcqtq\") pod \"certified-operators-jjp88\" (UID: \"ab6482c9-45bf-4770-8802-200ccb1732f9\") " pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:23 crc kubenswrapper[5014]: I1006 21:44:23.299663 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:23 crc kubenswrapper[5014]: I1006 21:44:23.927955 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jjp88"] Oct 06 21:44:24 crc kubenswrapper[5014]: I1006 21:44:24.116009 5014 generic.go:334] "Generic (PLEG): container finished" podID="ab6482c9-45bf-4770-8802-200ccb1732f9" containerID="561657a1455a533cbb9ac3c8acbe15182b2ef1d848e88474d9c1ac28dc3f9345" exitCode=0 Oct 06 21:44:24 crc kubenswrapper[5014]: I1006 21:44:24.116071 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjp88" event={"ID":"ab6482c9-45bf-4770-8802-200ccb1732f9","Type":"ContainerDied","Data":"561657a1455a533cbb9ac3c8acbe15182b2ef1d848e88474d9c1ac28dc3f9345"} Oct 06 21:44:24 crc kubenswrapper[5014]: I1006 21:44:24.116098 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjp88" event={"ID":"ab6482c9-45bf-4770-8802-200ccb1732f9","Type":"ContainerStarted","Data":"ba05ce52e15e19a3ed6324001c8fc3cb62331e95a65aa0afb586257bbea8b7b4"} Oct 06 21:44:25 crc kubenswrapper[5014]: I1006 21:44:25.125150 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjp88" event={"ID":"ab6482c9-45bf-4770-8802-200ccb1732f9","Type":"ContainerStarted","Data":"63b5ee38b78511eab953ca342b1ef84a5ea026f235a2dbc44aa9874fea702a6a"} Oct 06 21:44:25 crc kubenswrapper[5014]: I1006 21:44:25.545841 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-q4g5k"] Oct 06 21:44:25 crc kubenswrapper[5014]: I1006 21:44:25.546248 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-q4g5k" podUID="7a5d0729-830a-41aa-bcb6-392abe7cbe14" containerName="registry-server" containerID="cri-o://4a77bb2f1614c1b425e262c903e1e97ca1ad63079b2200dbb6645b30c4284a6f" gracePeriod=2 Oct 06 21:44:25 crc kubenswrapper[5014]: I1006 21:44:25.948166 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.135384 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a5d0729-830a-41aa-bcb6-392abe7cbe14-catalog-content\") pod \"7a5d0729-830a-41aa-bcb6-392abe7cbe14\" (UID: \"7a5d0729-830a-41aa-bcb6-392abe7cbe14\") " Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.135441 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7q64z\" (UniqueName: \"kubernetes.io/projected/7a5d0729-830a-41aa-bcb6-392abe7cbe14-kube-api-access-7q64z\") pod \"7a5d0729-830a-41aa-bcb6-392abe7cbe14\" (UID: \"7a5d0729-830a-41aa-bcb6-392abe7cbe14\") " Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.135553 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a5d0729-830a-41aa-bcb6-392abe7cbe14-utilities\") pod \"7a5d0729-830a-41aa-bcb6-392abe7cbe14\" (UID: \"7a5d0729-830a-41aa-bcb6-392abe7cbe14\") " Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.136579 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a5d0729-830a-41aa-bcb6-392abe7cbe14-utilities" (OuterVolumeSpecName: "utilities") pod "7a5d0729-830a-41aa-bcb6-392abe7cbe14" (UID: "7a5d0729-830a-41aa-bcb6-392abe7cbe14"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.144982 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a5d0729-830a-41aa-bcb6-392abe7cbe14-kube-api-access-7q64z" (OuterVolumeSpecName: "kube-api-access-7q64z") pod "7a5d0729-830a-41aa-bcb6-392abe7cbe14" (UID: "7a5d0729-830a-41aa-bcb6-392abe7cbe14"). InnerVolumeSpecName "kube-api-access-7q64z". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.147863 5014 generic.go:334] "Generic (PLEG): container finished" podID="7a5d0729-830a-41aa-bcb6-392abe7cbe14" containerID="4a77bb2f1614c1b425e262c903e1e97ca1ad63079b2200dbb6645b30c4284a6f" exitCode=0 Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.148145 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q4g5k" event={"ID":"7a5d0729-830a-41aa-bcb6-392abe7cbe14","Type":"ContainerDied","Data":"4a77bb2f1614c1b425e262c903e1e97ca1ad63079b2200dbb6645b30c4284a6f"} Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.148200 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q4g5k" event={"ID":"7a5d0729-830a-41aa-bcb6-392abe7cbe14","Type":"ContainerDied","Data":"fa4b7970281f776f089a80c3d44209bfa9317c46e0d92385f5668aef2c39a7f3"} Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.148208 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q4g5k" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.148222 5014 scope.go:117] "RemoveContainer" containerID="4a77bb2f1614c1b425e262c903e1e97ca1ad63079b2200dbb6645b30c4284a6f" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.156204 5014 generic.go:334] "Generic (PLEG): container finished" podID="ab6482c9-45bf-4770-8802-200ccb1732f9" containerID="63b5ee38b78511eab953ca342b1ef84a5ea026f235a2dbc44aa9874fea702a6a" exitCode=0 Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.156249 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjp88" event={"ID":"ab6482c9-45bf-4770-8802-200ccb1732f9","Type":"ContainerDied","Data":"63b5ee38b78511eab953ca342b1ef84a5ea026f235a2dbc44aa9874fea702a6a"} Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.156472 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a5d0729-830a-41aa-bcb6-392abe7cbe14-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7a5d0729-830a-41aa-bcb6-392abe7cbe14" (UID: "7a5d0729-830a-41aa-bcb6-392abe7cbe14"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.189845 5014 scope.go:117] "RemoveContainer" containerID="79c45bd87fdc6c37bf08aa9672d6f442e06b25e75ed4a37f1770a02e5d2b0a70" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.207998 5014 scope.go:117] "RemoveContainer" containerID="fc5eb58e910f668dba02f21eba7c75590d1e2818583e127862aa7e99ac341afd" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.229209 5014 scope.go:117] "RemoveContainer" containerID="4a77bb2f1614c1b425e262c903e1e97ca1ad63079b2200dbb6645b30c4284a6f" Oct 06 21:44:26 crc kubenswrapper[5014]: E1006 21:44:26.229788 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a77bb2f1614c1b425e262c903e1e97ca1ad63079b2200dbb6645b30c4284a6f\": container with ID starting with 4a77bb2f1614c1b425e262c903e1e97ca1ad63079b2200dbb6645b30c4284a6f not found: ID does not exist" containerID="4a77bb2f1614c1b425e262c903e1e97ca1ad63079b2200dbb6645b30c4284a6f" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.229866 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a77bb2f1614c1b425e262c903e1e97ca1ad63079b2200dbb6645b30c4284a6f"} err="failed to get container status \"4a77bb2f1614c1b425e262c903e1e97ca1ad63079b2200dbb6645b30c4284a6f\": rpc error: code = NotFound desc = could not find container \"4a77bb2f1614c1b425e262c903e1e97ca1ad63079b2200dbb6645b30c4284a6f\": container with ID starting with 4a77bb2f1614c1b425e262c903e1e97ca1ad63079b2200dbb6645b30c4284a6f not found: ID does not exist" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.229915 5014 scope.go:117] "RemoveContainer" containerID="79c45bd87fdc6c37bf08aa9672d6f442e06b25e75ed4a37f1770a02e5d2b0a70" Oct 06 21:44:26 crc kubenswrapper[5014]: E1006 21:44:26.230540 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79c45bd87fdc6c37bf08aa9672d6f442e06b25e75ed4a37f1770a02e5d2b0a70\": container with ID starting with 79c45bd87fdc6c37bf08aa9672d6f442e06b25e75ed4a37f1770a02e5d2b0a70 not found: ID does not exist" containerID="79c45bd87fdc6c37bf08aa9672d6f442e06b25e75ed4a37f1770a02e5d2b0a70" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.230583 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79c45bd87fdc6c37bf08aa9672d6f442e06b25e75ed4a37f1770a02e5d2b0a70"} err="failed to get container status \"79c45bd87fdc6c37bf08aa9672d6f442e06b25e75ed4a37f1770a02e5d2b0a70\": rpc error: code = NotFound desc = could not find container \"79c45bd87fdc6c37bf08aa9672d6f442e06b25e75ed4a37f1770a02e5d2b0a70\": container with ID starting with 79c45bd87fdc6c37bf08aa9672d6f442e06b25e75ed4a37f1770a02e5d2b0a70 not found: ID does not exist" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.230603 5014 scope.go:117] "RemoveContainer" containerID="fc5eb58e910f668dba02f21eba7c75590d1e2818583e127862aa7e99ac341afd" Oct 06 21:44:26 crc kubenswrapper[5014]: E1006 21:44:26.230918 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc5eb58e910f668dba02f21eba7c75590d1e2818583e127862aa7e99ac341afd\": container with ID starting with fc5eb58e910f668dba02f21eba7c75590d1e2818583e127862aa7e99ac341afd not found: ID does not exist" containerID="fc5eb58e910f668dba02f21eba7c75590d1e2818583e127862aa7e99ac341afd" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.230961 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc5eb58e910f668dba02f21eba7c75590d1e2818583e127862aa7e99ac341afd"} err="failed to get container status \"fc5eb58e910f668dba02f21eba7c75590d1e2818583e127862aa7e99ac341afd\": rpc error: code = NotFound desc = could not find container \"fc5eb58e910f668dba02f21eba7c75590d1e2818583e127862aa7e99ac341afd\": container with ID starting with fc5eb58e910f668dba02f21eba7c75590d1e2818583e127862aa7e99ac341afd not found: ID does not exist" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.237926 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7a5d0729-830a-41aa-bcb6-392abe7cbe14-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.237963 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7q64z\" (UniqueName: \"kubernetes.io/projected/7a5d0729-830a-41aa-bcb6-392abe7cbe14-kube-api-access-7q64z\") on node \"crc\" DevicePath \"\"" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.237979 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7a5d0729-830a-41aa-bcb6-392abe7cbe14-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.487730 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-q4g5k"] Oct 06 21:44:26 crc kubenswrapper[5014]: I1006 21:44:26.493113 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-q4g5k"] Oct 06 21:44:27 crc kubenswrapper[5014]: I1006 21:44:27.150757 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-6d84c4b8d5-p9mhq" Oct 06 21:44:27 crc kubenswrapper[5014]: I1006 21:44:27.166187 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjp88" event={"ID":"ab6482c9-45bf-4770-8802-200ccb1732f9","Type":"ContainerStarted","Data":"922c24e10a8af612a482e1cd544d21e61b03f12a420d645e1e7edcb4e08f9e5e"} Oct 06 21:44:27 crc kubenswrapper[5014]: I1006 21:44:27.492471 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a5d0729-830a-41aa-bcb6-392abe7cbe14" path="/var/lib/kubelet/pods/7a5d0729-830a-41aa-bcb6-392abe7cbe14/volumes" Oct 06 21:44:33 crc kubenswrapper[5014]: I1006 21:44:33.300916 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:33 crc kubenswrapper[5014]: I1006 21:44:33.301875 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:33 crc kubenswrapper[5014]: I1006 21:44:33.378306 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:33 crc kubenswrapper[5014]: I1006 21:44:33.413669 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jjp88" podStartSLOduration=8.865754238 podStartE2EDuration="11.413644819s" podCreationTimestamp="2025-10-06 21:44:22 +0000 UTC" firstStartedPulling="2025-10-06 21:44:24.11753457 +0000 UTC m=+809.410571304" lastFinishedPulling="2025-10-06 21:44:26.665425151 +0000 UTC m=+811.958461885" observedRunningTime="2025-10-06 21:44:27.218020399 +0000 UTC m=+812.511057133" watchObservedRunningTime="2025-10-06 21:44:33.413644819 +0000 UTC m=+818.706681593" Oct 06 21:44:34 crc kubenswrapper[5014]: I1006 21:44:34.280530 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:34 crc kubenswrapper[5014]: I1006 21:44:34.342430 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jjp88"] Oct 06 21:44:36 crc kubenswrapper[5014]: I1006 21:44:36.228984 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jjp88" podUID="ab6482c9-45bf-4770-8802-200ccb1732f9" containerName="registry-server" containerID="cri-o://922c24e10a8af612a482e1cd544d21e61b03f12a420d645e1e7edcb4e08f9e5e" gracePeriod=2 Oct 06 21:44:36 crc kubenswrapper[5014]: I1006 21:44:36.751745 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:36 crc kubenswrapper[5014]: I1006 21:44:36.892418 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab6482c9-45bf-4770-8802-200ccb1732f9-catalog-content\") pod \"ab6482c9-45bf-4770-8802-200ccb1732f9\" (UID: \"ab6482c9-45bf-4770-8802-200ccb1732f9\") " Oct 06 21:44:36 crc kubenswrapper[5014]: I1006 21:44:36.892544 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gcqtq\" (UniqueName: \"kubernetes.io/projected/ab6482c9-45bf-4770-8802-200ccb1732f9-kube-api-access-gcqtq\") pod \"ab6482c9-45bf-4770-8802-200ccb1732f9\" (UID: \"ab6482c9-45bf-4770-8802-200ccb1732f9\") " Oct 06 21:44:36 crc kubenswrapper[5014]: I1006 21:44:36.892730 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab6482c9-45bf-4770-8802-200ccb1732f9-utilities\") pod \"ab6482c9-45bf-4770-8802-200ccb1732f9\" (UID: \"ab6482c9-45bf-4770-8802-200ccb1732f9\") " Oct 06 21:44:36 crc kubenswrapper[5014]: I1006 21:44:36.894805 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab6482c9-45bf-4770-8802-200ccb1732f9-utilities" (OuterVolumeSpecName: "utilities") pod "ab6482c9-45bf-4770-8802-200ccb1732f9" (UID: "ab6482c9-45bf-4770-8802-200ccb1732f9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:44:36 crc kubenswrapper[5014]: I1006 21:44:36.904825 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab6482c9-45bf-4770-8802-200ccb1732f9-kube-api-access-gcqtq" (OuterVolumeSpecName: "kube-api-access-gcqtq") pod "ab6482c9-45bf-4770-8802-200ccb1732f9" (UID: "ab6482c9-45bf-4770-8802-200ccb1732f9"). InnerVolumeSpecName "kube-api-access-gcqtq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:44:36 crc kubenswrapper[5014]: I1006 21:44:36.978731 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab6482c9-45bf-4770-8802-200ccb1732f9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ab6482c9-45bf-4770-8802-200ccb1732f9" (UID: "ab6482c9-45bf-4770-8802-200ccb1732f9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:44:36 crc kubenswrapper[5014]: I1006 21:44:36.995017 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab6482c9-45bf-4770-8802-200ccb1732f9-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:44:36 crc kubenswrapper[5014]: I1006 21:44:36.995503 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab6482c9-45bf-4770-8802-200ccb1732f9-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:44:36 crc kubenswrapper[5014]: I1006 21:44:36.995839 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gcqtq\" (UniqueName: \"kubernetes.io/projected/ab6482c9-45bf-4770-8802-200ccb1732f9-kube-api-access-gcqtq\") on node \"crc\" DevicePath \"\"" Oct 06 21:44:37 crc kubenswrapper[5014]: I1006 21:44:37.241051 5014 generic.go:334] "Generic (PLEG): container finished" podID="ab6482c9-45bf-4770-8802-200ccb1732f9" containerID="922c24e10a8af612a482e1cd544d21e61b03f12a420d645e1e7edcb4e08f9e5e" exitCode=0 Oct 06 21:44:37 crc kubenswrapper[5014]: I1006 21:44:37.241131 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jjp88" Oct 06 21:44:37 crc kubenswrapper[5014]: I1006 21:44:37.241166 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjp88" event={"ID":"ab6482c9-45bf-4770-8802-200ccb1732f9","Type":"ContainerDied","Data":"922c24e10a8af612a482e1cd544d21e61b03f12a420d645e1e7edcb4e08f9e5e"} Oct 06 21:44:37 crc kubenswrapper[5014]: I1006 21:44:37.241276 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jjp88" event={"ID":"ab6482c9-45bf-4770-8802-200ccb1732f9","Type":"ContainerDied","Data":"ba05ce52e15e19a3ed6324001c8fc3cb62331e95a65aa0afb586257bbea8b7b4"} Oct 06 21:44:37 crc kubenswrapper[5014]: I1006 21:44:37.241310 5014 scope.go:117] "RemoveContainer" containerID="922c24e10a8af612a482e1cd544d21e61b03f12a420d645e1e7edcb4e08f9e5e" Oct 06 21:44:37 crc kubenswrapper[5014]: I1006 21:44:37.277390 5014 scope.go:117] "RemoveContainer" containerID="63b5ee38b78511eab953ca342b1ef84a5ea026f235a2dbc44aa9874fea702a6a" Oct 06 21:44:37 crc kubenswrapper[5014]: I1006 21:44:37.278518 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jjp88"] Oct 06 21:44:37 crc kubenswrapper[5014]: I1006 21:44:37.286703 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jjp88"] Oct 06 21:44:37 crc kubenswrapper[5014]: I1006 21:44:37.303878 5014 scope.go:117] "RemoveContainer" containerID="561657a1455a533cbb9ac3c8acbe15182b2ef1d848e88474d9c1ac28dc3f9345" Oct 06 21:44:37 crc kubenswrapper[5014]: I1006 21:44:37.324823 5014 scope.go:117] "RemoveContainer" containerID="922c24e10a8af612a482e1cd544d21e61b03f12a420d645e1e7edcb4e08f9e5e" Oct 06 21:44:37 crc kubenswrapper[5014]: E1006 21:44:37.325404 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"922c24e10a8af612a482e1cd544d21e61b03f12a420d645e1e7edcb4e08f9e5e\": container with ID starting with 922c24e10a8af612a482e1cd544d21e61b03f12a420d645e1e7edcb4e08f9e5e not found: ID does not exist" containerID="922c24e10a8af612a482e1cd544d21e61b03f12a420d645e1e7edcb4e08f9e5e" Oct 06 21:44:37 crc kubenswrapper[5014]: I1006 21:44:37.325465 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"922c24e10a8af612a482e1cd544d21e61b03f12a420d645e1e7edcb4e08f9e5e"} err="failed to get container status \"922c24e10a8af612a482e1cd544d21e61b03f12a420d645e1e7edcb4e08f9e5e\": rpc error: code = NotFound desc = could not find container \"922c24e10a8af612a482e1cd544d21e61b03f12a420d645e1e7edcb4e08f9e5e\": container with ID starting with 922c24e10a8af612a482e1cd544d21e61b03f12a420d645e1e7edcb4e08f9e5e not found: ID does not exist" Oct 06 21:44:37 crc kubenswrapper[5014]: I1006 21:44:37.325492 5014 scope.go:117] "RemoveContainer" containerID="63b5ee38b78511eab953ca342b1ef84a5ea026f235a2dbc44aa9874fea702a6a" Oct 06 21:44:37 crc kubenswrapper[5014]: E1006 21:44:37.325807 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63b5ee38b78511eab953ca342b1ef84a5ea026f235a2dbc44aa9874fea702a6a\": container with ID starting with 63b5ee38b78511eab953ca342b1ef84a5ea026f235a2dbc44aa9874fea702a6a not found: ID does not exist" containerID="63b5ee38b78511eab953ca342b1ef84a5ea026f235a2dbc44aa9874fea702a6a" Oct 06 21:44:37 crc kubenswrapper[5014]: I1006 21:44:37.325833 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63b5ee38b78511eab953ca342b1ef84a5ea026f235a2dbc44aa9874fea702a6a"} err="failed to get container status \"63b5ee38b78511eab953ca342b1ef84a5ea026f235a2dbc44aa9874fea702a6a\": rpc error: code = NotFound desc = could not find container \"63b5ee38b78511eab953ca342b1ef84a5ea026f235a2dbc44aa9874fea702a6a\": container with ID starting with 63b5ee38b78511eab953ca342b1ef84a5ea026f235a2dbc44aa9874fea702a6a not found: ID does not exist" Oct 06 21:44:37 crc kubenswrapper[5014]: I1006 21:44:37.325863 5014 scope.go:117] "RemoveContainer" containerID="561657a1455a533cbb9ac3c8acbe15182b2ef1d848e88474d9c1ac28dc3f9345" Oct 06 21:44:37 crc kubenswrapper[5014]: E1006 21:44:37.326079 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"561657a1455a533cbb9ac3c8acbe15182b2ef1d848e88474d9c1ac28dc3f9345\": container with ID starting with 561657a1455a533cbb9ac3c8acbe15182b2ef1d848e88474d9c1ac28dc3f9345 not found: ID does not exist" containerID="561657a1455a533cbb9ac3c8acbe15182b2ef1d848e88474d9c1ac28dc3f9345" Oct 06 21:44:37 crc kubenswrapper[5014]: I1006 21:44:37.326104 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"561657a1455a533cbb9ac3c8acbe15182b2ef1d848e88474d9c1ac28dc3f9345"} err="failed to get container status \"561657a1455a533cbb9ac3c8acbe15182b2ef1d848e88474d9c1ac28dc3f9345\": rpc error: code = NotFound desc = could not find container \"561657a1455a533cbb9ac3c8acbe15182b2ef1d848e88474d9c1ac28dc3f9345\": container with ID starting with 561657a1455a533cbb9ac3c8acbe15182b2ef1d848e88474d9c1ac28dc3f9345 not found: ID does not exist" Oct 06 21:44:37 crc kubenswrapper[5014]: I1006 21:44:37.505352 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab6482c9-45bf-4770-8802-200ccb1732f9" path="/var/lib/kubelet/pods/ab6482c9-45bf-4770-8802-200ccb1732f9/volumes" Oct 06 21:44:46 crc kubenswrapper[5014]: I1006 21:44:46.667336 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6d98768845-hrhnz" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.532811 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-jvrkm"] Oct 06 21:44:47 crc kubenswrapper[5014]: E1006 21:44:47.533352 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab6482c9-45bf-4770-8802-200ccb1732f9" containerName="extract-content" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.533365 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab6482c9-45bf-4770-8802-200ccb1732f9" containerName="extract-content" Oct 06 21:44:47 crc kubenswrapper[5014]: E1006 21:44:47.533381 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a5d0729-830a-41aa-bcb6-392abe7cbe14" containerName="extract-content" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.533388 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a5d0729-830a-41aa-bcb6-392abe7cbe14" containerName="extract-content" Oct 06 21:44:47 crc kubenswrapper[5014]: E1006 21:44:47.533400 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab6482c9-45bf-4770-8802-200ccb1732f9" containerName="extract-utilities" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.533407 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab6482c9-45bf-4770-8802-200ccb1732f9" containerName="extract-utilities" Oct 06 21:44:47 crc kubenswrapper[5014]: E1006 21:44:47.533418 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a5d0729-830a-41aa-bcb6-392abe7cbe14" containerName="extract-utilities" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.533424 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a5d0729-830a-41aa-bcb6-392abe7cbe14" containerName="extract-utilities" Oct 06 21:44:47 crc kubenswrapper[5014]: E1006 21:44:47.533437 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab6482c9-45bf-4770-8802-200ccb1732f9" containerName="registry-server" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.533443 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab6482c9-45bf-4770-8802-200ccb1732f9" containerName="registry-server" Oct 06 21:44:47 crc kubenswrapper[5014]: E1006 21:44:47.533452 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a5d0729-830a-41aa-bcb6-392abe7cbe14" containerName="registry-server" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.533457 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a5d0729-830a-41aa-bcb6-392abe7cbe14" containerName="registry-server" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.533569 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab6482c9-45bf-4770-8802-200ccb1732f9" containerName="registry-server" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.533583 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a5d0729-830a-41aa-bcb6-392abe7cbe14" containerName="registry-server" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.535463 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.536111 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl"] Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.536999 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.555480 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.555811 5014 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.555820 5014 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.558137 5014 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-c455m" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.577139 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl"] Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.622731 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-blzvr"] Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.629975 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-blzvr" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.633795 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.633815 5014 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-wjrk2" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.633846 5014 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.634552 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/6268ffa5-522f-435a-b01d-dec42e54da42-frr-conf\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.634589 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6268ffa5-522f-435a-b01d-dec42e54da42-metrics-certs\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.634611 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/6268ffa5-522f-435a-b01d-dec42e54da42-frr-startup\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.634654 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/54c48252-fba7-4ffa-9c64-a013e59660e1-cert\") pod \"frr-k8s-webhook-server-64bf5d555-5xzfl\" (UID: \"54c48252-fba7-4ffa-9c64-a013e59660e1\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.634680 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/6268ffa5-522f-435a-b01d-dec42e54da42-frr-sockets\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.634719 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/6268ffa5-522f-435a-b01d-dec42e54da42-reloader\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.634753 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/6268ffa5-522f-435a-b01d-dec42e54da42-metrics\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.634916 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntm8d\" (UniqueName: \"kubernetes.io/projected/6268ffa5-522f-435a-b01d-dec42e54da42-kube-api-access-ntm8d\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.635041 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pp252\" (UniqueName: \"kubernetes.io/projected/54c48252-fba7-4ffa-9c64-a013e59660e1-kube-api-access-pp252\") pod \"frr-k8s-webhook-server-64bf5d555-5xzfl\" (UID: \"54c48252-fba7-4ffa-9c64-a013e59660e1\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.636212 5014 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.650096 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-68d546b9d8-5tmn6"] Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.651388 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-68d546b9d8-5tmn6" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.655017 5014 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.665999 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-68d546b9d8-5tmn6"] Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.736155 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pp252\" (UniqueName: \"kubernetes.io/projected/54c48252-fba7-4ffa-9c64-a013e59660e1-kube-api-access-pp252\") pod \"frr-k8s-webhook-server-64bf5d555-5xzfl\" (UID: \"54c48252-fba7-4ffa-9c64-a013e59660e1\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.736246 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-metrics-certs\") pod \"speaker-blzvr\" (UID: \"b8e735e8-cf23-4db7-bc61-b7e2d5b245af\") " pod="metallb-system/speaker-blzvr" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.736277 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-memberlist\") pod \"speaker-blzvr\" (UID: \"b8e735e8-cf23-4db7-bc61-b7e2d5b245af\") " pod="metallb-system/speaker-blzvr" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.736309 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/6268ffa5-522f-435a-b01d-dec42e54da42-frr-conf\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.736327 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6268ffa5-522f-435a-b01d-dec42e54da42-metrics-certs\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.736351 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/99f234c2-01a1-49fe-8979-2b8bfaa3f08b-metrics-certs\") pod \"controller-68d546b9d8-5tmn6\" (UID: \"99f234c2-01a1-49fe-8979-2b8bfaa3f08b\") " pod="metallb-system/controller-68d546b9d8-5tmn6" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.736370 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/6268ffa5-522f-435a-b01d-dec42e54da42-frr-startup\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.736389 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w826z\" (UniqueName: \"kubernetes.io/projected/99f234c2-01a1-49fe-8979-2b8bfaa3f08b-kube-api-access-w826z\") pod \"controller-68d546b9d8-5tmn6\" (UID: \"99f234c2-01a1-49fe-8979-2b8bfaa3f08b\") " pod="metallb-system/controller-68d546b9d8-5tmn6" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.736410 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/54c48252-fba7-4ffa-9c64-a013e59660e1-cert\") pod \"frr-k8s-webhook-server-64bf5d555-5xzfl\" (UID: \"54c48252-fba7-4ffa-9c64-a013e59660e1\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.736428 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/6268ffa5-522f-435a-b01d-dec42e54da42-frr-sockets\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.736460 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/6268ffa5-522f-435a-b01d-dec42e54da42-reloader\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.736477 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lj5mm\" (UniqueName: \"kubernetes.io/projected/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-kube-api-access-lj5mm\") pod \"speaker-blzvr\" (UID: \"b8e735e8-cf23-4db7-bc61-b7e2d5b245af\") " pod="metallb-system/speaker-blzvr" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.736497 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-metallb-excludel2\") pod \"speaker-blzvr\" (UID: \"b8e735e8-cf23-4db7-bc61-b7e2d5b245af\") " pod="metallb-system/speaker-blzvr" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.736515 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/6268ffa5-522f-435a-b01d-dec42e54da42-metrics\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.736536 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/99f234c2-01a1-49fe-8979-2b8bfaa3f08b-cert\") pod \"controller-68d546b9d8-5tmn6\" (UID: \"99f234c2-01a1-49fe-8979-2b8bfaa3f08b\") " pod="metallb-system/controller-68d546b9d8-5tmn6" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.736559 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntm8d\" (UniqueName: \"kubernetes.io/projected/6268ffa5-522f-435a-b01d-dec42e54da42-kube-api-access-ntm8d\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: E1006 21:44:47.736972 5014 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Oct 06 21:44:47 crc kubenswrapper[5014]: E1006 21:44:47.737069 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6268ffa5-522f-435a-b01d-dec42e54da42-metrics-certs podName:6268ffa5-522f-435a-b01d-dec42e54da42 nodeName:}" failed. No retries permitted until 2025-10-06 21:44:48.237042372 +0000 UTC m=+833.530079106 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6268ffa5-522f-435a-b01d-dec42e54da42-metrics-certs") pod "frr-k8s-jvrkm" (UID: "6268ffa5-522f-435a-b01d-dec42e54da42") : secret "frr-k8s-certs-secret" not found Oct 06 21:44:47 crc kubenswrapper[5014]: E1006 21:44:47.737123 5014 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Oct 06 21:44:47 crc kubenswrapper[5014]: E1006 21:44:47.737183 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54c48252-fba7-4ffa-9c64-a013e59660e1-cert podName:54c48252-fba7-4ffa-9c64-a013e59660e1 nodeName:}" failed. No retries permitted until 2025-10-06 21:44:48.237162416 +0000 UTC m=+833.530199150 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/54c48252-fba7-4ffa-9c64-a013e59660e1-cert") pod "frr-k8s-webhook-server-64bf5d555-5xzfl" (UID: "54c48252-fba7-4ffa-9c64-a013e59660e1") : secret "frr-k8s-webhook-server-cert" not found Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.737595 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/6268ffa5-522f-435a-b01d-dec42e54da42-frr-conf\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.737754 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/6268ffa5-522f-435a-b01d-dec42e54da42-frr-sockets\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.737917 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/6268ffa5-522f-435a-b01d-dec42e54da42-metrics\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.738017 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/6268ffa5-522f-435a-b01d-dec42e54da42-reloader\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.738265 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/6268ffa5-522f-435a-b01d-dec42e54da42-frr-startup\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.759183 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pp252\" (UniqueName: \"kubernetes.io/projected/54c48252-fba7-4ffa-9c64-a013e59660e1-kube-api-access-pp252\") pod \"frr-k8s-webhook-server-64bf5d555-5xzfl\" (UID: \"54c48252-fba7-4ffa-9c64-a013e59660e1\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.762163 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntm8d\" (UniqueName: \"kubernetes.io/projected/6268ffa5-522f-435a-b01d-dec42e54da42-kube-api-access-ntm8d\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.837973 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/99f234c2-01a1-49fe-8979-2b8bfaa3f08b-cert\") pod \"controller-68d546b9d8-5tmn6\" (UID: \"99f234c2-01a1-49fe-8979-2b8bfaa3f08b\") " pod="metallb-system/controller-68d546b9d8-5tmn6" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.838050 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-metrics-certs\") pod \"speaker-blzvr\" (UID: \"b8e735e8-cf23-4db7-bc61-b7e2d5b245af\") " pod="metallb-system/speaker-blzvr" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.838068 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-memberlist\") pod \"speaker-blzvr\" (UID: \"b8e735e8-cf23-4db7-bc61-b7e2d5b245af\") " pod="metallb-system/speaker-blzvr" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.838106 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/99f234c2-01a1-49fe-8979-2b8bfaa3f08b-metrics-certs\") pod \"controller-68d546b9d8-5tmn6\" (UID: \"99f234c2-01a1-49fe-8979-2b8bfaa3f08b\") " pod="metallb-system/controller-68d546b9d8-5tmn6" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.838130 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w826z\" (UniqueName: \"kubernetes.io/projected/99f234c2-01a1-49fe-8979-2b8bfaa3f08b-kube-api-access-w826z\") pod \"controller-68d546b9d8-5tmn6\" (UID: \"99f234c2-01a1-49fe-8979-2b8bfaa3f08b\") " pod="metallb-system/controller-68d546b9d8-5tmn6" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.838194 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lj5mm\" (UniqueName: \"kubernetes.io/projected/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-kube-api-access-lj5mm\") pod \"speaker-blzvr\" (UID: \"b8e735e8-cf23-4db7-bc61-b7e2d5b245af\") " pod="metallb-system/speaker-blzvr" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.838220 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-metallb-excludel2\") pod \"speaker-blzvr\" (UID: \"b8e735e8-cf23-4db7-bc61-b7e2d5b245af\") " pod="metallb-system/speaker-blzvr" Oct 06 21:44:47 crc kubenswrapper[5014]: E1006 21:44:47.838219 5014 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Oct 06 21:44:47 crc kubenswrapper[5014]: E1006 21:44:47.838308 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-metrics-certs podName:b8e735e8-cf23-4db7-bc61-b7e2d5b245af nodeName:}" failed. No retries permitted until 2025-10-06 21:44:48.338285193 +0000 UTC m=+833.631321927 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-metrics-certs") pod "speaker-blzvr" (UID: "b8e735e8-cf23-4db7-bc61-b7e2d5b245af") : secret "speaker-certs-secret" not found Oct 06 21:44:47 crc kubenswrapper[5014]: E1006 21:44:47.838232 5014 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Oct 06 21:44:47 crc kubenswrapper[5014]: E1006 21:44:47.838438 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-memberlist podName:b8e735e8-cf23-4db7-bc61-b7e2d5b245af nodeName:}" failed. No retries permitted until 2025-10-06 21:44:48.338431528 +0000 UTC m=+833.631468262 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-memberlist") pod "speaker-blzvr" (UID: "b8e735e8-cf23-4db7-bc61-b7e2d5b245af") : secret "metallb-memberlist" not found Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.839417 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-metallb-excludel2\") pod \"speaker-blzvr\" (UID: \"b8e735e8-cf23-4db7-bc61-b7e2d5b245af\") " pod="metallb-system/speaker-blzvr" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.842775 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/99f234c2-01a1-49fe-8979-2b8bfaa3f08b-cert\") pod \"controller-68d546b9d8-5tmn6\" (UID: \"99f234c2-01a1-49fe-8979-2b8bfaa3f08b\") " pod="metallb-system/controller-68d546b9d8-5tmn6" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.842994 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/99f234c2-01a1-49fe-8979-2b8bfaa3f08b-metrics-certs\") pod \"controller-68d546b9d8-5tmn6\" (UID: \"99f234c2-01a1-49fe-8979-2b8bfaa3f08b\") " pod="metallb-system/controller-68d546b9d8-5tmn6" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.855038 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w826z\" (UniqueName: \"kubernetes.io/projected/99f234c2-01a1-49fe-8979-2b8bfaa3f08b-kube-api-access-w826z\") pod \"controller-68d546b9d8-5tmn6\" (UID: \"99f234c2-01a1-49fe-8979-2b8bfaa3f08b\") " pod="metallb-system/controller-68d546b9d8-5tmn6" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.859246 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lj5mm\" (UniqueName: \"kubernetes.io/projected/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-kube-api-access-lj5mm\") pod \"speaker-blzvr\" (UID: \"b8e735e8-cf23-4db7-bc61-b7e2d5b245af\") " pod="metallb-system/speaker-blzvr" Oct 06 21:44:47 crc kubenswrapper[5014]: I1006 21:44:47.970594 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-68d546b9d8-5tmn6" Oct 06 21:44:48 crc kubenswrapper[5014]: I1006 21:44:48.245810 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6268ffa5-522f-435a-b01d-dec42e54da42-metrics-certs\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:48 crc kubenswrapper[5014]: I1006 21:44:48.245893 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/54c48252-fba7-4ffa-9c64-a013e59660e1-cert\") pod \"frr-k8s-webhook-server-64bf5d555-5xzfl\" (UID: \"54c48252-fba7-4ffa-9c64-a013e59660e1\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl" Oct 06 21:44:48 crc kubenswrapper[5014]: I1006 21:44:48.250841 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6268ffa5-522f-435a-b01d-dec42e54da42-metrics-certs\") pod \"frr-k8s-jvrkm\" (UID: \"6268ffa5-522f-435a-b01d-dec42e54da42\") " pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:48 crc kubenswrapper[5014]: I1006 21:44:48.251687 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/54c48252-fba7-4ffa-9c64-a013e59660e1-cert\") pod \"frr-k8s-webhook-server-64bf5d555-5xzfl\" (UID: \"54c48252-fba7-4ffa-9c64-a013e59660e1\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl" Oct 06 21:44:48 crc kubenswrapper[5014]: I1006 21:44:48.306204 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-68d546b9d8-5tmn6"] Oct 06 21:44:48 crc kubenswrapper[5014]: I1006 21:44:48.323894 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-68d546b9d8-5tmn6" event={"ID":"99f234c2-01a1-49fe-8979-2b8bfaa3f08b","Type":"ContainerStarted","Data":"18d88ee53ebbc928d673d707dd0679bd10733bd428963aa7c40ccea8afe2dd2d"} Oct 06 21:44:48 crc kubenswrapper[5014]: I1006 21:44:48.347082 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-metrics-certs\") pod \"speaker-blzvr\" (UID: \"b8e735e8-cf23-4db7-bc61-b7e2d5b245af\") " pod="metallb-system/speaker-blzvr" Oct 06 21:44:48 crc kubenswrapper[5014]: I1006 21:44:48.347155 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-memberlist\") pod \"speaker-blzvr\" (UID: \"b8e735e8-cf23-4db7-bc61-b7e2d5b245af\") " pod="metallb-system/speaker-blzvr" Oct 06 21:44:48 crc kubenswrapper[5014]: E1006 21:44:48.347415 5014 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Oct 06 21:44:48 crc kubenswrapper[5014]: E1006 21:44:48.347497 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-memberlist podName:b8e735e8-cf23-4db7-bc61-b7e2d5b245af nodeName:}" failed. No retries permitted until 2025-10-06 21:44:49.347474663 +0000 UTC m=+834.640511407 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-memberlist") pod "speaker-blzvr" (UID: "b8e735e8-cf23-4db7-bc61-b7e2d5b245af") : secret "metallb-memberlist" not found Oct 06 21:44:48 crc kubenswrapper[5014]: I1006 21:44:48.355002 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-metrics-certs\") pod \"speaker-blzvr\" (UID: \"b8e735e8-cf23-4db7-bc61-b7e2d5b245af\") " pod="metallb-system/speaker-blzvr" Oct 06 21:44:48 crc kubenswrapper[5014]: I1006 21:44:48.486326 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:44:48 crc kubenswrapper[5014]: I1006 21:44:48.492859 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl" Oct 06 21:44:49 crc kubenswrapper[5014]: I1006 21:44:49.003211 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl"] Oct 06 21:44:49 crc kubenswrapper[5014]: W1006 21:44:49.012491 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54c48252_fba7_4ffa_9c64_a013e59660e1.slice/crio-6c5c8ab91a5baf3903ed4fec56674873282a87c38ec531e062d7c3afed7d73c6 WatchSource:0}: Error finding container 6c5c8ab91a5baf3903ed4fec56674873282a87c38ec531e062d7c3afed7d73c6: Status 404 returned error can't find the container with id 6c5c8ab91a5baf3903ed4fec56674873282a87c38ec531e062d7c3afed7d73c6 Oct 06 21:44:49 crc kubenswrapper[5014]: I1006 21:44:49.332591 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jvrkm" event={"ID":"6268ffa5-522f-435a-b01d-dec42e54da42","Type":"ContainerStarted","Data":"36663ccdde01a69d380460ee12b2a5338e3c575a11460670dc7417f283e04e4e"} Oct 06 21:44:49 crc kubenswrapper[5014]: I1006 21:44:49.334920 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-68d546b9d8-5tmn6" event={"ID":"99f234c2-01a1-49fe-8979-2b8bfaa3f08b","Type":"ContainerStarted","Data":"07be7077b2a310ecdd9edf5c731541afa1509325353871699387dfb35a1d238d"} Oct 06 21:44:49 crc kubenswrapper[5014]: I1006 21:44:49.334964 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-68d546b9d8-5tmn6" event={"ID":"99f234c2-01a1-49fe-8979-2b8bfaa3f08b","Type":"ContainerStarted","Data":"97203bdb9dfcab8c45969d4211d484455615427b1005829eb14391c16e30e0d8"} Oct 06 21:44:49 crc kubenswrapper[5014]: I1006 21:44:49.335317 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-68d546b9d8-5tmn6" Oct 06 21:44:49 crc kubenswrapper[5014]: I1006 21:44:49.336765 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl" event={"ID":"54c48252-fba7-4ffa-9c64-a013e59660e1","Type":"ContainerStarted","Data":"6c5c8ab91a5baf3903ed4fec56674873282a87c38ec531e062d7c3afed7d73c6"} Oct 06 21:44:49 crc kubenswrapper[5014]: I1006 21:44:49.358188 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-68d546b9d8-5tmn6" podStartSLOduration=2.358155711 podStartE2EDuration="2.358155711s" podCreationTimestamp="2025-10-06 21:44:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:44:49.355562059 +0000 UTC m=+834.648598843" watchObservedRunningTime="2025-10-06 21:44:49.358155711 +0000 UTC m=+834.651192485" Oct 06 21:44:49 crc kubenswrapper[5014]: I1006 21:44:49.384851 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-memberlist\") pod \"speaker-blzvr\" (UID: \"b8e735e8-cf23-4db7-bc61-b7e2d5b245af\") " pod="metallb-system/speaker-blzvr" Oct 06 21:44:49 crc kubenswrapper[5014]: I1006 21:44:49.390876 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/b8e735e8-cf23-4db7-bc61-b7e2d5b245af-memberlist\") pod \"speaker-blzvr\" (UID: \"b8e735e8-cf23-4db7-bc61-b7e2d5b245af\") " pod="metallb-system/speaker-blzvr" Oct 06 21:44:49 crc kubenswrapper[5014]: I1006 21:44:49.451540 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-blzvr" Oct 06 21:44:49 crc kubenswrapper[5014]: W1006 21:44:49.489105 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb8e735e8_cf23_4db7_bc61_b7e2d5b245af.slice/crio-d3a853c785ead1fd26486458ea2a9cf8c3b4983d35f063a4e35b6b9d9540adcd WatchSource:0}: Error finding container d3a853c785ead1fd26486458ea2a9cf8c3b4983d35f063a4e35b6b9d9540adcd: Status 404 returned error can't find the container with id d3a853c785ead1fd26486458ea2a9cf8c3b4983d35f063a4e35b6b9d9540adcd Oct 06 21:44:50 crc kubenswrapper[5014]: I1006 21:44:50.346648 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-blzvr" event={"ID":"b8e735e8-cf23-4db7-bc61-b7e2d5b245af","Type":"ContainerStarted","Data":"1150a8d0bb9b992b78feb8b524f3e9b3f54c07fffdee25d603828f9e562e03a9"} Oct 06 21:44:50 crc kubenswrapper[5014]: I1006 21:44:50.347108 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-blzvr" event={"ID":"b8e735e8-cf23-4db7-bc61-b7e2d5b245af","Type":"ContainerStarted","Data":"83e68d40ef5980ad7cdf5a2197ca4ee26b7a4b2549d9d4aea3939d735a2c69ac"} Oct 06 21:44:50 crc kubenswrapper[5014]: I1006 21:44:50.347130 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-blzvr" event={"ID":"b8e735e8-cf23-4db7-bc61-b7e2d5b245af","Type":"ContainerStarted","Data":"d3a853c785ead1fd26486458ea2a9cf8c3b4983d35f063a4e35b6b9d9540adcd"} Oct 06 21:44:50 crc kubenswrapper[5014]: I1006 21:44:50.347351 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-blzvr" Oct 06 21:44:50 crc kubenswrapper[5014]: I1006 21:44:50.361967 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-blzvr" podStartSLOduration=3.361951161 podStartE2EDuration="3.361951161s" podCreationTimestamp="2025-10-06 21:44:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:44:50.360001829 +0000 UTC m=+835.653038563" watchObservedRunningTime="2025-10-06 21:44:50.361951161 +0000 UTC m=+835.654987885" Oct 06 21:44:56 crc kubenswrapper[5014]: I1006 21:44:56.390426 5014 generic.go:334] "Generic (PLEG): container finished" podID="6268ffa5-522f-435a-b01d-dec42e54da42" containerID="a534aad1b78ec50036c49f4545260f6987a2af393bf2114ce26416436eb75d74" exitCode=0 Oct 06 21:44:56 crc kubenswrapper[5014]: I1006 21:44:56.390523 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jvrkm" event={"ID":"6268ffa5-522f-435a-b01d-dec42e54da42","Type":"ContainerDied","Data":"a534aad1b78ec50036c49f4545260f6987a2af393bf2114ce26416436eb75d74"} Oct 06 21:44:56 crc kubenswrapper[5014]: I1006 21:44:56.393139 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl" event={"ID":"54c48252-fba7-4ffa-9c64-a013e59660e1","Type":"ContainerStarted","Data":"2ca99434f89ef5e1af410f969a16faf8a99ca0a9431be253d175a84b734ff6e0"} Oct 06 21:44:56 crc kubenswrapper[5014]: I1006 21:44:56.393428 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl" Oct 06 21:44:56 crc kubenswrapper[5014]: I1006 21:44:56.461089 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl" podStartSLOduration=2.7232850490000002 podStartE2EDuration="9.461063268s" podCreationTimestamp="2025-10-06 21:44:47 +0000 UTC" firstStartedPulling="2025-10-06 21:44:49.015074886 +0000 UTC m=+834.308111650" lastFinishedPulling="2025-10-06 21:44:55.752853125 +0000 UTC m=+841.045889869" observedRunningTime="2025-10-06 21:44:56.459301083 +0000 UTC m=+841.752337857" watchObservedRunningTime="2025-10-06 21:44:56.461063268 +0000 UTC m=+841.754100012" Oct 06 21:44:57 crc kubenswrapper[5014]: I1006 21:44:57.403860 5014 generic.go:334] "Generic (PLEG): container finished" podID="6268ffa5-522f-435a-b01d-dec42e54da42" containerID="eba68c6d9e2388b399fe9acca7fecfb4621d92a616963ae63113c3b1a42277b8" exitCode=0 Oct 06 21:44:57 crc kubenswrapper[5014]: I1006 21:44:57.403923 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jvrkm" event={"ID":"6268ffa5-522f-435a-b01d-dec42e54da42","Type":"ContainerDied","Data":"eba68c6d9e2388b399fe9acca7fecfb4621d92a616963ae63113c3b1a42277b8"} Oct 06 21:44:58 crc kubenswrapper[5014]: I1006 21:44:58.415053 5014 generic.go:334] "Generic (PLEG): container finished" podID="6268ffa5-522f-435a-b01d-dec42e54da42" containerID="337c9764a2374e2a267a9a6c2b53a1f5ef10ffc2e247803c140c782bd86368dd" exitCode=0 Oct 06 21:44:58 crc kubenswrapper[5014]: I1006 21:44:58.415115 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jvrkm" event={"ID":"6268ffa5-522f-435a-b01d-dec42e54da42","Type":"ContainerDied","Data":"337c9764a2374e2a267a9a6c2b53a1f5ef10ffc2e247803c140c782bd86368dd"} Oct 06 21:44:59 crc kubenswrapper[5014]: I1006 21:44:59.427462 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jvrkm" event={"ID":"6268ffa5-522f-435a-b01d-dec42e54da42","Type":"ContainerStarted","Data":"42d140e7714451dc5cc7ede40f6f56d13c81d63e5d8cb0de7d4959a8f6271285"} Oct 06 21:44:59 crc kubenswrapper[5014]: I1006 21:44:59.427898 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jvrkm" event={"ID":"6268ffa5-522f-435a-b01d-dec42e54da42","Type":"ContainerStarted","Data":"26d5935890cd3e1ef47a972b0cacdc7ceb5a2853b7e10d0bdf705a99d394ff1f"} Oct 06 21:44:59 crc kubenswrapper[5014]: I1006 21:44:59.427913 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jvrkm" event={"ID":"6268ffa5-522f-435a-b01d-dec42e54da42","Type":"ContainerStarted","Data":"ebfcf1327a587d0d5644a8dbb71dfd44ac3c5da54dfe69bb8f8f5ff2817d5489"} Oct 06 21:44:59 crc kubenswrapper[5014]: I1006 21:44:59.427924 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jvrkm" event={"ID":"6268ffa5-522f-435a-b01d-dec42e54da42","Type":"ContainerStarted","Data":"034a99bf613f8a8b1745ce659729ad99e68530bb0fb5174623ed7260084a22eb"} Oct 06 21:44:59 crc kubenswrapper[5014]: I1006 21:44:59.427935 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jvrkm" event={"ID":"6268ffa5-522f-435a-b01d-dec42e54da42","Type":"ContainerStarted","Data":"e85973aa7c1cdb647a0f0abec4929608a97d805611b150fd9c7beae03bc5d4d0"} Oct 06 21:44:59 crc kubenswrapper[5014]: I1006 21:44:59.455129 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-blzvr" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.216111 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44"] Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.217308 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.220426 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.220953 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.233348 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44"] Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.345318 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcd6g\" (UniqueName: \"kubernetes.io/projected/e81f009f-1219-4bfc-8516-e68492370963-kube-api-access-zcd6g\") pod \"collect-profiles-29329785-6xg44\" (UID: \"e81f009f-1219-4bfc-8516-e68492370963\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.345394 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e81f009f-1219-4bfc-8516-e68492370963-secret-volume\") pod \"collect-profiles-29329785-6xg44\" (UID: \"e81f009f-1219-4bfc-8516-e68492370963\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.345444 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e81f009f-1219-4bfc-8516-e68492370963-config-volume\") pod \"collect-profiles-29329785-6xg44\" (UID: \"e81f009f-1219-4bfc-8516-e68492370963\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.436363 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-jvrkm" event={"ID":"6268ffa5-522f-435a-b01d-dec42e54da42","Type":"ContainerStarted","Data":"a46d8f75c0e5b8e803d1bea664b589c4647a65ecae177445044ff9200ee3489b"} Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.436573 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.446866 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcd6g\" (UniqueName: \"kubernetes.io/projected/e81f009f-1219-4bfc-8516-e68492370963-kube-api-access-zcd6g\") pod \"collect-profiles-29329785-6xg44\" (UID: \"e81f009f-1219-4bfc-8516-e68492370963\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.446917 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e81f009f-1219-4bfc-8516-e68492370963-secret-volume\") pod \"collect-profiles-29329785-6xg44\" (UID: \"e81f009f-1219-4bfc-8516-e68492370963\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.446963 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e81f009f-1219-4bfc-8516-e68492370963-config-volume\") pod \"collect-profiles-29329785-6xg44\" (UID: \"e81f009f-1219-4bfc-8516-e68492370963\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.447943 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e81f009f-1219-4bfc-8516-e68492370963-config-volume\") pod \"collect-profiles-29329785-6xg44\" (UID: \"e81f009f-1219-4bfc-8516-e68492370963\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.453299 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e81f009f-1219-4bfc-8516-e68492370963-secret-volume\") pod \"collect-profiles-29329785-6xg44\" (UID: \"e81f009f-1219-4bfc-8516-e68492370963\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.462880 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-jvrkm" podStartSLOduration=6.383734061 podStartE2EDuration="13.46285879s" podCreationTimestamp="2025-10-06 21:44:47 +0000 UTC" firstStartedPulling="2025-10-06 21:44:48.674063037 +0000 UTC m=+833.967099771" lastFinishedPulling="2025-10-06 21:44:55.753187726 +0000 UTC m=+841.046224500" observedRunningTime="2025-10-06 21:45:00.459445172 +0000 UTC m=+845.752481906" watchObservedRunningTime="2025-10-06 21:45:00.46285879 +0000 UTC m=+845.755895524" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.469205 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcd6g\" (UniqueName: \"kubernetes.io/projected/e81f009f-1219-4bfc-8516-e68492370963-kube-api-access-zcd6g\") pod \"collect-profiles-29329785-6xg44\" (UID: \"e81f009f-1219-4bfc-8516-e68492370963\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.496351 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ktlpw"] Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.497757 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.514153 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ktlpw"] Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.539901 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.650790 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94d775ee-c348-4654-a726-01715e16dc1f-catalog-content\") pod \"community-operators-ktlpw\" (UID: \"94d775ee-c348-4654-a726-01715e16dc1f\") " pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.651268 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7x79j\" (UniqueName: \"kubernetes.io/projected/94d775ee-c348-4654-a726-01715e16dc1f-kube-api-access-7x79j\") pod \"community-operators-ktlpw\" (UID: \"94d775ee-c348-4654-a726-01715e16dc1f\") " pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.651442 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94d775ee-c348-4654-a726-01715e16dc1f-utilities\") pod \"community-operators-ktlpw\" (UID: \"94d775ee-c348-4654-a726-01715e16dc1f\") " pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.752542 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94d775ee-c348-4654-a726-01715e16dc1f-catalog-content\") pod \"community-operators-ktlpw\" (UID: \"94d775ee-c348-4654-a726-01715e16dc1f\") " pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.752654 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7x79j\" (UniqueName: \"kubernetes.io/projected/94d775ee-c348-4654-a726-01715e16dc1f-kube-api-access-7x79j\") pod \"community-operators-ktlpw\" (UID: \"94d775ee-c348-4654-a726-01715e16dc1f\") " pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.752752 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94d775ee-c348-4654-a726-01715e16dc1f-utilities\") pod \"community-operators-ktlpw\" (UID: \"94d775ee-c348-4654-a726-01715e16dc1f\") " pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.753165 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94d775ee-c348-4654-a726-01715e16dc1f-catalog-content\") pod \"community-operators-ktlpw\" (UID: \"94d775ee-c348-4654-a726-01715e16dc1f\") " pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.753996 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94d775ee-c348-4654-a726-01715e16dc1f-utilities\") pod \"community-operators-ktlpw\" (UID: \"94d775ee-c348-4654-a726-01715e16dc1f\") " pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.777744 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7x79j\" (UniqueName: \"kubernetes.io/projected/94d775ee-c348-4654-a726-01715e16dc1f-kube-api-access-7x79j\") pod \"community-operators-ktlpw\" (UID: \"94d775ee-c348-4654-a726-01715e16dc1f\") " pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.817859 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:00 crc kubenswrapper[5014]: I1006 21:45:00.991590 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44"] Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.067401 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ktlpw"] Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.302316 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8"] Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.303364 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.306463 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.314910 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8"] Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.457864 5014 generic.go:334] "Generic (PLEG): container finished" podID="e81f009f-1219-4bfc-8516-e68492370963" containerID="1010ad625e7f41bf996aecce90637779585df2f8e573b1ecb843256705457d18" exitCode=0 Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.458090 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44" event={"ID":"e81f009f-1219-4bfc-8516-e68492370963","Type":"ContainerDied","Data":"1010ad625e7f41bf996aecce90637779585df2f8e573b1ecb843256705457d18"} Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.459331 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44" event={"ID":"e81f009f-1219-4bfc-8516-e68492370963","Type":"ContainerStarted","Data":"952892a0e3e292fd2dcdf86b12199efbaa82a5c8ac7c9330235784d3a7995885"} Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.463437 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-bundle\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8\" (UID: \"5fc5c3ee-98c8-494d-9d3a-410a290a68b0\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.463577 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-util\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8\" (UID: \"5fc5c3ee-98c8-494d-9d3a-410a290a68b0\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.463675 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rr78\" (UniqueName: \"kubernetes.io/projected/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-kube-api-access-7rr78\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8\" (UID: \"5fc5c3ee-98c8-494d-9d3a-410a290a68b0\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.467635 5014 generic.go:334] "Generic (PLEG): container finished" podID="94d775ee-c348-4654-a726-01715e16dc1f" containerID="74237af95515eee647e8a610205724a5742cedaca96f9be9648ea525c73f4eee" exitCode=0 Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.468638 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ktlpw" event={"ID":"94d775ee-c348-4654-a726-01715e16dc1f","Type":"ContainerDied","Data":"74237af95515eee647e8a610205724a5742cedaca96f9be9648ea525c73f4eee"} Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.468709 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ktlpw" event={"ID":"94d775ee-c348-4654-a726-01715e16dc1f","Type":"ContainerStarted","Data":"1571ae1c1b42cd59c03ab8dac94d5d9a582cd7294bc3da078372c6b3d0184e13"} Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.567153 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rr78\" (UniqueName: \"kubernetes.io/projected/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-kube-api-access-7rr78\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8\" (UID: \"5fc5c3ee-98c8-494d-9d3a-410a290a68b0\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.567259 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-bundle\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8\" (UID: \"5fc5c3ee-98c8-494d-9d3a-410a290a68b0\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.567317 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-util\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8\" (UID: \"5fc5c3ee-98c8-494d-9d3a-410a290a68b0\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.568197 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-util\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8\" (UID: \"5fc5c3ee-98c8-494d-9d3a-410a290a68b0\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.569557 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-bundle\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8\" (UID: \"5fc5c3ee-98c8-494d-9d3a-410a290a68b0\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.583000 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rr78\" (UniqueName: \"kubernetes.io/projected/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-kube-api-access-7rr78\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8\" (UID: \"5fc5c3ee-98c8-494d-9d3a-410a290a68b0\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" Oct 06 21:45:01 crc kubenswrapper[5014]: I1006 21:45:01.630765 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" Oct 06 21:45:02 crc kubenswrapper[5014]: I1006 21:45:02.027433 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8"] Oct 06 21:45:02 crc kubenswrapper[5014]: I1006 21:45:02.476760 5014 generic.go:334] "Generic (PLEG): container finished" podID="5fc5c3ee-98c8-494d-9d3a-410a290a68b0" containerID="f15d49d0197557e3a0004663acc6c984e9ab8c3784c607b764bf4364e64b1c98" exitCode=0 Oct 06 21:45:02 crc kubenswrapper[5014]: I1006 21:45:02.476811 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" event={"ID":"5fc5c3ee-98c8-494d-9d3a-410a290a68b0","Type":"ContainerDied","Data":"f15d49d0197557e3a0004663acc6c984e9ab8c3784c607b764bf4364e64b1c98"} Oct 06 21:45:02 crc kubenswrapper[5014]: I1006 21:45:02.477171 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" event={"ID":"5fc5c3ee-98c8-494d-9d3a-410a290a68b0","Type":"ContainerStarted","Data":"acf62887037a6bfdc76ca6f42249189f6971e3e71277137dd3c11ff17c34efd9"} Oct 06 21:45:02 crc kubenswrapper[5014]: I1006 21:45:02.732471 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44" Oct 06 21:45:02 crc kubenswrapper[5014]: I1006 21:45:02.890130 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e81f009f-1219-4bfc-8516-e68492370963-secret-volume\") pod \"e81f009f-1219-4bfc-8516-e68492370963\" (UID: \"e81f009f-1219-4bfc-8516-e68492370963\") " Oct 06 21:45:02 crc kubenswrapper[5014]: I1006 21:45:02.890417 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e81f009f-1219-4bfc-8516-e68492370963-config-volume\") pod \"e81f009f-1219-4bfc-8516-e68492370963\" (UID: \"e81f009f-1219-4bfc-8516-e68492370963\") " Oct 06 21:45:02 crc kubenswrapper[5014]: I1006 21:45:02.890569 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zcd6g\" (UniqueName: \"kubernetes.io/projected/e81f009f-1219-4bfc-8516-e68492370963-kube-api-access-zcd6g\") pod \"e81f009f-1219-4bfc-8516-e68492370963\" (UID: \"e81f009f-1219-4bfc-8516-e68492370963\") " Oct 06 21:45:02 crc kubenswrapper[5014]: I1006 21:45:02.891329 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e81f009f-1219-4bfc-8516-e68492370963-config-volume" (OuterVolumeSpecName: "config-volume") pod "e81f009f-1219-4bfc-8516-e68492370963" (UID: "e81f009f-1219-4bfc-8516-e68492370963"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:45:02 crc kubenswrapper[5014]: I1006 21:45:02.911795 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e81f009f-1219-4bfc-8516-e68492370963-kube-api-access-zcd6g" (OuterVolumeSpecName: "kube-api-access-zcd6g") pod "e81f009f-1219-4bfc-8516-e68492370963" (UID: "e81f009f-1219-4bfc-8516-e68492370963"). InnerVolumeSpecName "kube-api-access-zcd6g". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:45:02 crc kubenswrapper[5014]: I1006 21:45:02.921246 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e81f009f-1219-4bfc-8516-e68492370963-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e81f009f-1219-4bfc-8516-e68492370963" (UID: "e81f009f-1219-4bfc-8516-e68492370963"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:45:02 crc kubenswrapper[5014]: I1006 21:45:02.992750 5014 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e81f009f-1219-4bfc-8516-e68492370963-config-volume\") on node \"crc\" DevicePath \"\"" Oct 06 21:45:02 crc kubenswrapper[5014]: I1006 21:45:02.992784 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zcd6g\" (UniqueName: \"kubernetes.io/projected/e81f009f-1219-4bfc-8516-e68492370963-kube-api-access-zcd6g\") on node \"crc\" DevicePath \"\"" Oct 06 21:45:02 crc kubenswrapper[5014]: I1006 21:45:02.992797 5014 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e81f009f-1219-4bfc-8516-e68492370963-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 06 21:45:03 crc kubenswrapper[5014]: I1006 21:45:03.485108 5014 generic.go:334] "Generic (PLEG): container finished" podID="94d775ee-c348-4654-a726-01715e16dc1f" containerID="f5be7d8d0fcaba17cfb29da7d6d103eefe5bf65340c303bf9c3213152956ef90" exitCode=0 Oct 06 21:45:03 crc kubenswrapper[5014]: I1006 21:45:03.488634 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44" Oct 06 21:45:03 crc kubenswrapper[5014]: I1006 21:45:03.495559 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:45:03 crc kubenswrapper[5014]: I1006 21:45:03.495590 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ktlpw" event={"ID":"94d775ee-c348-4654-a726-01715e16dc1f","Type":"ContainerDied","Data":"f5be7d8d0fcaba17cfb29da7d6d103eefe5bf65340c303bf9c3213152956ef90"} Oct 06 21:45:03 crc kubenswrapper[5014]: I1006 21:45:03.495610 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44" event={"ID":"e81f009f-1219-4bfc-8516-e68492370963","Type":"ContainerDied","Data":"952892a0e3e292fd2dcdf86b12199efbaa82a5c8ac7c9330235784d3a7995885"} Oct 06 21:45:03 crc kubenswrapper[5014]: I1006 21:45:03.495642 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="952892a0e3e292fd2dcdf86b12199efbaa82a5c8ac7c9330235784d3a7995885" Oct 06 21:45:03 crc kubenswrapper[5014]: I1006 21:45:03.529146 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:45:04 crc kubenswrapper[5014]: I1006 21:45:04.498444 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ktlpw" event={"ID":"94d775ee-c348-4654-a726-01715e16dc1f","Type":"ContainerStarted","Data":"207f4445c2d7c542fad3ab8ec142fb607a5ef57259d78dbece0234381674a0fe"} Oct 06 21:45:04 crc kubenswrapper[5014]: I1006 21:45:04.518382 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ktlpw" podStartSLOduration=1.93283068 podStartE2EDuration="4.518365251s" podCreationTimestamp="2025-10-06 21:45:00 +0000 UTC" firstStartedPulling="2025-10-06 21:45:01.470055211 +0000 UTC m=+846.763091945" lastFinishedPulling="2025-10-06 21:45:04.055589772 +0000 UTC m=+849.348626516" observedRunningTime="2025-10-06 21:45:04.516408999 +0000 UTC m=+849.809445753" watchObservedRunningTime="2025-10-06 21:45:04.518365251 +0000 UTC m=+849.811401985" Oct 06 21:45:06 crc kubenswrapper[5014]: I1006 21:45:06.513592 5014 generic.go:334] "Generic (PLEG): container finished" podID="5fc5c3ee-98c8-494d-9d3a-410a290a68b0" containerID="c3371115c0203808c3edcbe6f33f0c2e78e8a8ab09c9e606433e3607bcbece66" exitCode=0 Oct 06 21:45:06 crc kubenswrapper[5014]: I1006 21:45:06.513700 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" event={"ID":"5fc5c3ee-98c8-494d-9d3a-410a290a68b0","Type":"ContainerDied","Data":"c3371115c0203808c3edcbe6f33f0c2e78e8a8ab09c9e606433e3607bcbece66"} Oct 06 21:45:07 crc kubenswrapper[5014]: I1006 21:45:07.524165 5014 generic.go:334] "Generic (PLEG): container finished" podID="5fc5c3ee-98c8-494d-9d3a-410a290a68b0" containerID="1e280e4d432fc14a40c2cb360206b0708ef09c5b265fc4fae0c0f26544332f04" exitCode=0 Oct 06 21:45:07 crc kubenswrapper[5014]: I1006 21:45:07.524271 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" event={"ID":"5fc5c3ee-98c8-494d-9d3a-410a290a68b0","Type":"ContainerDied","Data":"1e280e4d432fc14a40c2cb360206b0708ef09c5b265fc4fae0c0f26544332f04"} Oct 06 21:45:07 crc kubenswrapper[5014]: I1006 21:45:07.976271 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-68d546b9d8-5tmn6" Oct 06 21:45:08 crc kubenswrapper[5014]: I1006 21:45:08.492233 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-jvrkm" Oct 06 21:45:08 crc kubenswrapper[5014]: I1006 21:45:08.521797 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-5xzfl" Oct 06 21:45:08 crc kubenswrapper[5014]: I1006 21:45:08.925500 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" Oct 06 21:45:08 crc kubenswrapper[5014]: I1006 21:45:08.983039 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-util\") pod \"5fc5c3ee-98c8-494d-9d3a-410a290a68b0\" (UID: \"5fc5c3ee-98c8-494d-9d3a-410a290a68b0\") " Oct 06 21:45:08 crc kubenswrapper[5014]: I1006 21:45:08.983166 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7rr78\" (UniqueName: \"kubernetes.io/projected/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-kube-api-access-7rr78\") pod \"5fc5c3ee-98c8-494d-9d3a-410a290a68b0\" (UID: \"5fc5c3ee-98c8-494d-9d3a-410a290a68b0\") " Oct 06 21:45:08 crc kubenswrapper[5014]: I1006 21:45:08.983204 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-bundle\") pod \"5fc5c3ee-98c8-494d-9d3a-410a290a68b0\" (UID: \"5fc5c3ee-98c8-494d-9d3a-410a290a68b0\") " Oct 06 21:45:08 crc kubenswrapper[5014]: I1006 21:45:08.984335 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-bundle" (OuterVolumeSpecName: "bundle") pod "5fc5c3ee-98c8-494d-9d3a-410a290a68b0" (UID: "5fc5c3ee-98c8-494d-9d3a-410a290a68b0"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:45:08 crc kubenswrapper[5014]: I1006 21:45:08.994298 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-kube-api-access-7rr78" (OuterVolumeSpecName: "kube-api-access-7rr78") pod "5fc5c3ee-98c8-494d-9d3a-410a290a68b0" (UID: "5fc5c3ee-98c8-494d-9d3a-410a290a68b0"). InnerVolumeSpecName "kube-api-access-7rr78". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:45:09 crc kubenswrapper[5014]: I1006 21:45:09.002781 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-util" (OuterVolumeSpecName: "util") pod "5fc5c3ee-98c8-494d-9d3a-410a290a68b0" (UID: "5fc5c3ee-98c8-494d-9d3a-410a290a68b0"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:45:09 crc kubenswrapper[5014]: I1006 21:45:09.085154 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7rr78\" (UniqueName: \"kubernetes.io/projected/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-kube-api-access-7rr78\") on node \"crc\" DevicePath \"\"" Oct 06 21:45:09 crc kubenswrapper[5014]: I1006 21:45:09.085634 5014 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:45:09 crc kubenswrapper[5014]: I1006 21:45:09.085649 5014 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5fc5c3ee-98c8-494d-9d3a-410a290a68b0-util\") on node \"crc\" DevicePath \"\"" Oct 06 21:45:09 crc kubenswrapper[5014]: I1006 21:45:09.540223 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" event={"ID":"5fc5c3ee-98c8-494d-9d3a-410a290a68b0","Type":"ContainerDied","Data":"acf62887037a6bfdc76ca6f42249189f6971e3e71277137dd3c11ff17c34efd9"} Oct 06 21:45:09 crc kubenswrapper[5014]: I1006 21:45:09.540274 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="acf62887037a6bfdc76ca6f42249189f6971e3e71277137dd3c11ff17c34efd9" Oct 06 21:45:09 crc kubenswrapper[5014]: I1006 21:45:09.540545 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8" Oct 06 21:45:10 crc kubenswrapper[5014]: I1006 21:45:10.819036 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:10 crc kubenswrapper[5014]: I1006 21:45:10.819119 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:10 crc kubenswrapper[5014]: I1006 21:45:10.891955 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:11 crc kubenswrapper[5014]: I1006 21:45:11.624514 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:13 crc kubenswrapper[5014]: I1006 21:45:13.263392 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ktlpw"] Oct 06 21:45:13 crc kubenswrapper[5014]: I1006 21:45:13.567217 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ktlpw" podUID="94d775ee-c348-4654-a726-01715e16dc1f" containerName="registry-server" containerID="cri-o://207f4445c2d7c542fad3ab8ec142fb607a5ef57259d78dbece0234381674a0fe" gracePeriod=2 Oct 06 21:45:13 crc kubenswrapper[5014]: I1006 21:45:13.961875 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-jnn42"] Oct 06 21:45:13 crc kubenswrapper[5014]: E1006 21:45:13.962456 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e81f009f-1219-4bfc-8516-e68492370963" containerName="collect-profiles" Oct 06 21:45:13 crc kubenswrapper[5014]: I1006 21:45:13.962471 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="e81f009f-1219-4bfc-8516-e68492370963" containerName="collect-profiles" Oct 06 21:45:13 crc kubenswrapper[5014]: E1006 21:45:13.962489 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fc5c3ee-98c8-494d-9d3a-410a290a68b0" containerName="pull" Oct 06 21:45:13 crc kubenswrapper[5014]: I1006 21:45:13.962497 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fc5c3ee-98c8-494d-9d3a-410a290a68b0" containerName="pull" Oct 06 21:45:13 crc kubenswrapper[5014]: E1006 21:45:13.964469 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fc5c3ee-98c8-494d-9d3a-410a290a68b0" containerName="util" Oct 06 21:45:13 crc kubenswrapper[5014]: I1006 21:45:13.964495 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fc5c3ee-98c8-494d-9d3a-410a290a68b0" containerName="util" Oct 06 21:45:13 crc kubenswrapper[5014]: E1006 21:45:13.964518 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fc5c3ee-98c8-494d-9d3a-410a290a68b0" containerName="extract" Oct 06 21:45:13 crc kubenswrapper[5014]: I1006 21:45:13.964527 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fc5c3ee-98c8-494d-9d3a-410a290a68b0" containerName="extract" Oct 06 21:45:13 crc kubenswrapper[5014]: I1006 21:45:13.964720 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5fc5c3ee-98c8-494d-9d3a-410a290a68b0" containerName="extract" Oct 06 21:45:13 crc kubenswrapper[5014]: I1006 21:45:13.964748 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="e81f009f-1219-4bfc-8516-e68492370963" containerName="collect-profiles" Oct 06 21:45:13 crc kubenswrapper[5014]: I1006 21:45:13.965224 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-jnn42" Oct 06 21:45:13 crc kubenswrapper[5014]: I1006 21:45:13.967132 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Oct 06 21:45:13 crc kubenswrapper[5014]: I1006 21:45:13.967360 5014 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-j99nm" Oct 06 21:45:13 crc kubenswrapper[5014]: I1006 21:45:13.967844 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Oct 06 21:45:13 crc kubenswrapper[5014]: I1006 21:45:13.980495 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-jnn42"] Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.004274 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.051225 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7x79j\" (UniqueName: \"kubernetes.io/projected/94d775ee-c348-4654-a726-01715e16dc1f-kube-api-access-7x79j\") pod \"94d775ee-c348-4654-a726-01715e16dc1f\" (UID: \"94d775ee-c348-4654-a726-01715e16dc1f\") " Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.051277 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94d775ee-c348-4654-a726-01715e16dc1f-catalog-content\") pod \"94d775ee-c348-4654-a726-01715e16dc1f\" (UID: \"94d775ee-c348-4654-a726-01715e16dc1f\") " Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.052160 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94d775ee-c348-4654-a726-01715e16dc1f-utilities" (OuterVolumeSpecName: "utilities") pod "94d775ee-c348-4654-a726-01715e16dc1f" (UID: "94d775ee-c348-4654-a726-01715e16dc1f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.058391 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94d775ee-c348-4654-a726-01715e16dc1f-kube-api-access-7x79j" (OuterVolumeSpecName: "kube-api-access-7x79j") pod "94d775ee-c348-4654-a726-01715e16dc1f" (UID: "94d775ee-c348-4654-a726-01715e16dc1f"). InnerVolumeSpecName "kube-api-access-7x79j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.051298 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94d775ee-c348-4654-a726-01715e16dc1f-utilities\") pod \"94d775ee-c348-4654-a726-01715e16dc1f\" (UID: \"94d775ee-c348-4654-a726-01715e16dc1f\") " Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.065118 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knqxh\" (UniqueName: \"kubernetes.io/projected/a0fd8663-a7dd-4ade-9300-8df14361e873-kube-api-access-knqxh\") pod \"cert-manager-operator-controller-manager-57cd46d6d-jnn42\" (UID: \"a0fd8663-a7dd-4ade-9300-8df14361e873\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-jnn42" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.065381 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7x79j\" (UniqueName: \"kubernetes.io/projected/94d775ee-c348-4654-a726-01715e16dc1f-kube-api-access-7x79j\") on node \"crc\" DevicePath \"\"" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.065400 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94d775ee-c348-4654-a726-01715e16dc1f-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.101093 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94d775ee-c348-4654-a726-01715e16dc1f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "94d775ee-c348-4654-a726-01715e16dc1f" (UID: "94d775ee-c348-4654-a726-01715e16dc1f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.166273 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knqxh\" (UniqueName: \"kubernetes.io/projected/a0fd8663-a7dd-4ade-9300-8df14361e873-kube-api-access-knqxh\") pod \"cert-manager-operator-controller-manager-57cd46d6d-jnn42\" (UID: \"a0fd8663-a7dd-4ade-9300-8df14361e873\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-jnn42" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.166438 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94d775ee-c348-4654-a726-01715e16dc1f-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.184673 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knqxh\" (UniqueName: \"kubernetes.io/projected/a0fd8663-a7dd-4ade-9300-8df14361e873-kube-api-access-knqxh\") pod \"cert-manager-operator-controller-manager-57cd46d6d-jnn42\" (UID: \"a0fd8663-a7dd-4ade-9300-8df14361e873\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-jnn42" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.324876 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-jnn42" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.575022 5014 generic.go:334] "Generic (PLEG): container finished" podID="94d775ee-c348-4654-a726-01715e16dc1f" containerID="207f4445c2d7c542fad3ab8ec142fb607a5ef57259d78dbece0234381674a0fe" exitCode=0 Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.575072 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ktlpw" event={"ID":"94d775ee-c348-4654-a726-01715e16dc1f","Type":"ContainerDied","Data":"207f4445c2d7c542fad3ab8ec142fb607a5ef57259d78dbece0234381674a0fe"} Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.575364 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ktlpw" event={"ID":"94d775ee-c348-4654-a726-01715e16dc1f","Type":"ContainerDied","Data":"1571ae1c1b42cd59c03ab8dac94d5d9a582cd7294bc3da078372c6b3d0184e13"} Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.575403 5014 scope.go:117] "RemoveContainer" containerID="207f4445c2d7c542fad3ab8ec142fb607a5ef57259d78dbece0234381674a0fe" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.575106 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ktlpw" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.594861 5014 scope.go:117] "RemoveContainer" containerID="f5be7d8d0fcaba17cfb29da7d6d103eefe5bf65340c303bf9c3213152956ef90" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.608778 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ktlpw"] Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.612823 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ktlpw"] Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.627782 5014 scope.go:117] "RemoveContainer" containerID="74237af95515eee647e8a610205724a5742cedaca96f9be9648ea525c73f4eee" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.650478 5014 scope.go:117] "RemoveContainer" containerID="207f4445c2d7c542fad3ab8ec142fb607a5ef57259d78dbece0234381674a0fe" Oct 06 21:45:14 crc kubenswrapper[5014]: E1006 21:45:14.654767 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"207f4445c2d7c542fad3ab8ec142fb607a5ef57259d78dbece0234381674a0fe\": container with ID starting with 207f4445c2d7c542fad3ab8ec142fb607a5ef57259d78dbece0234381674a0fe not found: ID does not exist" containerID="207f4445c2d7c542fad3ab8ec142fb607a5ef57259d78dbece0234381674a0fe" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.654827 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"207f4445c2d7c542fad3ab8ec142fb607a5ef57259d78dbece0234381674a0fe"} err="failed to get container status \"207f4445c2d7c542fad3ab8ec142fb607a5ef57259d78dbece0234381674a0fe\": rpc error: code = NotFound desc = could not find container \"207f4445c2d7c542fad3ab8ec142fb607a5ef57259d78dbece0234381674a0fe\": container with ID starting with 207f4445c2d7c542fad3ab8ec142fb607a5ef57259d78dbece0234381674a0fe not found: ID does not exist" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.654860 5014 scope.go:117] "RemoveContainer" containerID="f5be7d8d0fcaba17cfb29da7d6d103eefe5bf65340c303bf9c3213152956ef90" Oct 06 21:45:14 crc kubenswrapper[5014]: E1006 21:45:14.655937 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5be7d8d0fcaba17cfb29da7d6d103eefe5bf65340c303bf9c3213152956ef90\": container with ID starting with f5be7d8d0fcaba17cfb29da7d6d103eefe5bf65340c303bf9c3213152956ef90 not found: ID does not exist" containerID="f5be7d8d0fcaba17cfb29da7d6d103eefe5bf65340c303bf9c3213152956ef90" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.655975 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5be7d8d0fcaba17cfb29da7d6d103eefe5bf65340c303bf9c3213152956ef90"} err="failed to get container status \"f5be7d8d0fcaba17cfb29da7d6d103eefe5bf65340c303bf9c3213152956ef90\": rpc error: code = NotFound desc = could not find container \"f5be7d8d0fcaba17cfb29da7d6d103eefe5bf65340c303bf9c3213152956ef90\": container with ID starting with f5be7d8d0fcaba17cfb29da7d6d103eefe5bf65340c303bf9c3213152956ef90 not found: ID does not exist" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.656252 5014 scope.go:117] "RemoveContainer" containerID="74237af95515eee647e8a610205724a5742cedaca96f9be9648ea525c73f4eee" Oct 06 21:45:14 crc kubenswrapper[5014]: E1006 21:45:14.663902 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74237af95515eee647e8a610205724a5742cedaca96f9be9648ea525c73f4eee\": container with ID starting with 74237af95515eee647e8a610205724a5742cedaca96f9be9648ea525c73f4eee not found: ID does not exist" containerID="74237af95515eee647e8a610205724a5742cedaca96f9be9648ea525c73f4eee" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.663958 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74237af95515eee647e8a610205724a5742cedaca96f9be9648ea525c73f4eee"} err="failed to get container status \"74237af95515eee647e8a610205724a5742cedaca96f9be9648ea525c73f4eee\": rpc error: code = NotFound desc = could not find container \"74237af95515eee647e8a610205724a5742cedaca96f9be9648ea525c73f4eee\": container with ID starting with 74237af95515eee647e8a610205724a5742cedaca96f9be9648ea525c73f4eee not found: ID does not exist" Oct 06 21:45:14 crc kubenswrapper[5014]: I1006 21:45:14.771430 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-jnn42"] Oct 06 21:45:14 crc kubenswrapper[5014]: W1006 21:45:14.778315 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0fd8663_a7dd_4ade_9300_8df14361e873.slice/crio-9ee1f2d18296de57d3e857103f01f565d6dcba21335a0c05c3254251209f7ac0 WatchSource:0}: Error finding container 9ee1f2d18296de57d3e857103f01f565d6dcba21335a0c05c3254251209f7ac0: Status 404 returned error can't find the container with id 9ee1f2d18296de57d3e857103f01f565d6dcba21335a0c05c3254251209f7ac0 Oct 06 21:45:15 crc kubenswrapper[5014]: I1006 21:45:15.492359 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94d775ee-c348-4654-a726-01715e16dc1f" path="/var/lib/kubelet/pods/94d775ee-c348-4654-a726-01715e16dc1f/volumes" Oct 06 21:45:15 crc kubenswrapper[5014]: I1006 21:45:15.582883 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-jnn42" event={"ID":"a0fd8663-a7dd-4ade-9300-8df14361e873","Type":"ContainerStarted","Data":"9ee1f2d18296de57d3e857103f01f565d6dcba21335a0c05c3254251209f7ac0"} Oct 06 21:45:21 crc kubenswrapper[5014]: I1006 21:45:21.637949 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-jnn42" event={"ID":"a0fd8663-a7dd-4ade-9300-8df14361e873","Type":"ContainerStarted","Data":"ce3f73e2168ac91bb0acbb1c884b8d7fa40e2654efb20509c75b814a11181f49"} Oct 06 21:45:21 crc kubenswrapper[5014]: I1006 21:45:21.662022 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-jnn42" podStartSLOduration=2.308555108 podStartE2EDuration="8.66198764s" podCreationTimestamp="2025-10-06 21:45:13 +0000 UTC" firstStartedPulling="2025-10-06 21:45:14.781803707 +0000 UTC m=+860.074840441" lastFinishedPulling="2025-10-06 21:45:21.135236249 +0000 UTC m=+866.428272973" observedRunningTime="2025-10-06 21:45:21.655867355 +0000 UTC m=+866.948904119" watchObservedRunningTime="2025-10-06 21:45:21.66198764 +0000 UTC m=+866.955024414" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.311229 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-d969966f-5vl2k"] Oct 06 21:45:26 crc kubenswrapper[5014]: E1006 21:45:26.312143 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94d775ee-c348-4654-a726-01715e16dc1f" containerName="extract-content" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.312156 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="94d775ee-c348-4654-a726-01715e16dc1f" containerName="extract-content" Oct 06 21:45:26 crc kubenswrapper[5014]: E1006 21:45:26.312181 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94d775ee-c348-4654-a726-01715e16dc1f" containerName="extract-utilities" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.312186 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="94d775ee-c348-4654-a726-01715e16dc1f" containerName="extract-utilities" Oct 06 21:45:26 crc kubenswrapper[5014]: E1006 21:45:26.312193 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94d775ee-c348-4654-a726-01715e16dc1f" containerName="registry-server" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.312198 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="94d775ee-c348-4654-a726-01715e16dc1f" containerName="registry-server" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.312293 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="94d775ee-c348-4654-a726-01715e16dc1f" containerName="registry-server" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.312711 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-d969966f-5vl2k" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.314842 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.314870 5014 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-ggqlc" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.321801 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.338293 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfzwb\" (UniqueName: \"kubernetes.io/projected/c4bf31a0-2c7c-4936-8f03-928208b80a04-kube-api-access-pfzwb\") pod \"cert-manager-webhook-d969966f-5vl2k\" (UID: \"c4bf31a0-2c7c-4936-8f03-928208b80a04\") " pod="cert-manager/cert-manager-webhook-d969966f-5vl2k" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.338405 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c4bf31a0-2c7c-4936-8f03-928208b80a04-bound-sa-token\") pod \"cert-manager-webhook-d969966f-5vl2k\" (UID: \"c4bf31a0-2c7c-4936-8f03-928208b80a04\") " pod="cert-manager/cert-manager-webhook-d969966f-5vl2k" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.355750 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-d969966f-5vl2k"] Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.439419 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c4bf31a0-2c7c-4936-8f03-928208b80a04-bound-sa-token\") pod \"cert-manager-webhook-d969966f-5vl2k\" (UID: \"c4bf31a0-2c7c-4936-8f03-928208b80a04\") " pod="cert-manager/cert-manager-webhook-d969966f-5vl2k" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.439493 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfzwb\" (UniqueName: \"kubernetes.io/projected/c4bf31a0-2c7c-4936-8f03-928208b80a04-kube-api-access-pfzwb\") pod \"cert-manager-webhook-d969966f-5vl2k\" (UID: \"c4bf31a0-2c7c-4936-8f03-928208b80a04\") " pod="cert-manager/cert-manager-webhook-d969966f-5vl2k" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.460280 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c4bf31a0-2c7c-4936-8f03-928208b80a04-bound-sa-token\") pod \"cert-manager-webhook-d969966f-5vl2k\" (UID: \"c4bf31a0-2c7c-4936-8f03-928208b80a04\") " pod="cert-manager/cert-manager-webhook-d969966f-5vl2k" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.462900 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfzwb\" (UniqueName: \"kubernetes.io/projected/c4bf31a0-2c7c-4936-8f03-928208b80a04-kube-api-access-pfzwb\") pod \"cert-manager-webhook-d969966f-5vl2k\" (UID: \"c4bf31a0-2c7c-4936-8f03-928208b80a04\") " pod="cert-manager/cert-manager-webhook-d969966f-5vl2k" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.627500 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-d969966f-5vl2k" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.885284 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7d9f95dbf-kg5dg"] Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.886352 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7d9f95dbf-kg5dg" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.889000 5014 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-j6r4f" Oct 06 21:45:26 crc kubenswrapper[5014]: I1006 21:45:26.896251 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7d9f95dbf-kg5dg"] Oct 06 21:45:27 crc kubenswrapper[5014]: I1006 21:45:27.056662 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nbmz\" (UniqueName: \"kubernetes.io/projected/c2d94b0d-307d-473d-b0dc-441e22f2c606-kube-api-access-8nbmz\") pod \"cert-manager-cainjector-7d9f95dbf-kg5dg\" (UID: \"c2d94b0d-307d-473d-b0dc-441e22f2c606\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-kg5dg" Oct 06 21:45:27 crc kubenswrapper[5014]: I1006 21:45:27.056727 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c2d94b0d-307d-473d-b0dc-441e22f2c606-bound-sa-token\") pod \"cert-manager-cainjector-7d9f95dbf-kg5dg\" (UID: \"c2d94b0d-307d-473d-b0dc-441e22f2c606\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-kg5dg" Oct 06 21:45:27 crc kubenswrapper[5014]: I1006 21:45:27.158218 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nbmz\" (UniqueName: \"kubernetes.io/projected/c2d94b0d-307d-473d-b0dc-441e22f2c606-kube-api-access-8nbmz\") pod \"cert-manager-cainjector-7d9f95dbf-kg5dg\" (UID: \"c2d94b0d-307d-473d-b0dc-441e22f2c606\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-kg5dg" Oct 06 21:45:27 crc kubenswrapper[5014]: I1006 21:45:27.158305 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c2d94b0d-307d-473d-b0dc-441e22f2c606-bound-sa-token\") pod \"cert-manager-cainjector-7d9f95dbf-kg5dg\" (UID: \"c2d94b0d-307d-473d-b0dc-441e22f2c606\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-kg5dg" Oct 06 21:45:27 crc kubenswrapper[5014]: I1006 21:45:27.165742 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-d969966f-5vl2k"] Oct 06 21:45:27 crc kubenswrapper[5014]: W1006 21:45:27.187789 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc4bf31a0_2c7c_4936_8f03_928208b80a04.slice/crio-65d2f37aa17068f587102ea6f1a740859f507c50049a2a0918c720c164b31582 WatchSource:0}: Error finding container 65d2f37aa17068f587102ea6f1a740859f507c50049a2a0918c720c164b31582: Status 404 returned error can't find the container with id 65d2f37aa17068f587102ea6f1a740859f507c50049a2a0918c720c164b31582 Oct 06 21:45:27 crc kubenswrapper[5014]: I1006 21:45:27.189794 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nbmz\" (UniqueName: \"kubernetes.io/projected/c2d94b0d-307d-473d-b0dc-441e22f2c606-kube-api-access-8nbmz\") pod \"cert-manager-cainjector-7d9f95dbf-kg5dg\" (UID: \"c2d94b0d-307d-473d-b0dc-441e22f2c606\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-kg5dg" Oct 06 21:45:27 crc kubenswrapper[5014]: I1006 21:45:27.196545 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c2d94b0d-307d-473d-b0dc-441e22f2c606-bound-sa-token\") pod \"cert-manager-cainjector-7d9f95dbf-kg5dg\" (UID: \"c2d94b0d-307d-473d-b0dc-441e22f2c606\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-kg5dg" Oct 06 21:45:27 crc kubenswrapper[5014]: I1006 21:45:27.208220 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7d9f95dbf-kg5dg" Oct 06 21:45:27 crc kubenswrapper[5014]: I1006 21:45:27.644558 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7d9f95dbf-kg5dg"] Oct 06 21:45:27 crc kubenswrapper[5014]: I1006 21:45:27.678813 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-d969966f-5vl2k" event={"ID":"c4bf31a0-2c7c-4936-8f03-928208b80a04","Type":"ContainerStarted","Data":"65d2f37aa17068f587102ea6f1a740859f507c50049a2a0918c720c164b31582"} Oct 06 21:45:27 crc kubenswrapper[5014]: I1006 21:45:27.679763 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7d9f95dbf-kg5dg" event={"ID":"c2d94b0d-307d-473d-b0dc-441e22f2c606","Type":"ContainerStarted","Data":"85f07fd24be63fd8207313efc0a4f9be9ac5034f31a12c1547e6e5d1e7b7b631"} Oct 06 21:45:31 crc kubenswrapper[5014]: I1006 21:45:31.710563 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-d969966f-5vl2k" event={"ID":"c4bf31a0-2c7c-4936-8f03-928208b80a04","Type":"ContainerStarted","Data":"b1d2952006762b55bd44582d1f8ee83432b422d17a625a1ba7bcad3c42ca0f1c"} Oct 06 21:45:31 crc kubenswrapper[5014]: I1006 21:45:31.711210 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-d969966f-5vl2k" Oct 06 21:45:31 crc kubenswrapper[5014]: I1006 21:45:31.712877 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7d9f95dbf-kg5dg" event={"ID":"c2d94b0d-307d-473d-b0dc-441e22f2c606","Type":"ContainerStarted","Data":"59e14e90e8bef061e17bb8ea2d72f2e4f415c50c7abfcd9388f312d2433e5b73"} Oct 06 21:45:31 crc kubenswrapper[5014]: I1006 21:45:31.726102 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-d969966f-5vl2k" podStartSLOduration=1.7261728010000001 podStartE2EDuration="5.726080978s" podCreationTimestamp="2025-10-06 21:45:26 +0000 UTC" firstStartedPulling="2025-10-06 21:45:27.195796385 +0000 UTC m=+872.488833139" lastFinishedPulling="2025-10-06 21:45:31.195704542 +0000 UTC m=+876.488741316" observedRunningTime="2025-10-06 21:45:31.722988929 +0000 UTC m=+877.016025663" watchObservedRunningTime="2025-10-06 21:45:31.726080978 +0000 UTC m=+877.019117712" Oct 06 21:45:31 crc kubenswrapper[5014]: I1006 21:45:31.741322 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7d9f95dbf-kg5dg" podStartSLOduration=2.231020606 podStartE2EDuration="5.74129961s" podCreationTimestamp="2025-10-06 21:45:26 +0000 UTC" firstStartedPulling="2025-10-06 21:45:27.658659398 +0000 UTC m=+872.951696132" lastFinishedPulling="2025-10-06 21:45:31.168938362 +0000 UTC m=+876.461975136" observedRunningTime="2025-10-06 21:45:31.738146531 +0000 UTC m=+877.031183275" watchObservedRunningTime="2025-10-06 21:45:31.74129961 +0000 UTC m=+877.034336344" Oct 06 21:45:36 crc kubenswrapper[5014]: I1006 21:45:36.631746 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-d969966f-5vl2k" Oct 06 21:45:44 crc kubenswrapper[5014]: I1006 21:45:44.050549 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-7d4cc89fcb-ttqz7"] Oct 06 21:45:44 crc kubenswrapper[5014]: I1006 21:45:44.053108 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-7d4cc89fcb-ttqz7" Oct 06 21:45:44 crc kubenswrapper[5014]: I1006 21:45:44.056199 5014 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-vck7v" Oct 06 21:45:44 crc kubenswrapper[5014]: I1006 21:45:44.074866 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-7d4cc89fcb-ttqz7"] Oct 06 21:45:44 crc kubenswrapper[5014]: I1006 21:45:44.131136 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8ddbd27d-bb5c-4d47-90bc-7215f30636aa-bound-sa-token\") pod \"cert-manager-7d4cc89fcb-ttqz7\" (UID: \"8ddbd27d-bb5c-4d47-90bc-7215f30636aa\") " pod="cert-manager/cert-manager-7d4cc89fcb-ttqz7" Oct 06 21:45:44 crc kubenswrapper[5014]: I1006 21:45:44.131256 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5wvz\" (UniqueName: \"kubernetes.io/projected/8ddbd27d-bb5c-4d47-90bc-7215f30636aa-kube-api-access-p5wvz\") pod \"cert-manager-7d4cc89fcb-ttqz7\" (UID: \"8ddbd27d-bb5c-4d47-90bc-7215f30636aa\") " pod="cert-manager/cert-manager-7d4cc89fcb-ttqz7" Oct 06 21:45:44 crc kubenswrapper[5014]: I1006 21:45:44.232517 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8ddbd27d-bb5c-4d47-90bc-7215f30636aa-bound-sa-token\") pod \"cert-manager-7d4cc89fcb-ttqz7\" (UID: \"8ddbd27d-bb5c-4d47-90bc-7215f30636aa\") " pod="cert-manager/cert-manager-7d4cc89fcb-ttqz7" Oct 06 21:45:44 crc kubenswrapper[5014]: I1006 21:45:44.232748 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5wvz\" (UniqueName: \"kubernetes.io/projected/8ddbd27d-bb5c-4d47-90bc-7215f30636aa-kube-api-access-p5wvz\") pod \"cert-manager-7d4cc89fcb-ttqz7\" (UID: \"8ddbd27d-bb5c-4d47-90bc-7215f30636aa\") " pod="cert-manager/cert-manager-7d4cc89fcb-ttqz7" Oct 06 21:45:44 crc kubenswrapper[5014]: I1006 21:45:44.283889 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5wvz\" (UniqueName: \"kubernetes.io/projected/8ddbd27d-bb5c-4d47-90bc-7215f30636aa-kube-api-access-p5wvz\") pod \"cert-manager-7d4cc89fcb-ttqz7\" (UID: \"8ddbd27d-bb5c-4d47-90bc-7215f30636aa\") " pod="cert-manager/cert-manager-7d4cc89fcb-ttqz7" Oct 06 21:45:44 crc kubenswrapper[5014]: I1006 21:45:44.284997 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8ddbd27d-bb5c-4d47-90bc-7215f30636aa-bound-sa-token\") pod \"cert-manager-7d4cc89fcb-ttqz7\" (UID: \"8ddbd27d-bb5c-4d47-90bc-7215f30636aa\") " pod="cert-manager/cert-manager-7d4cc89fcb-ttqz7" Oct 06 21:45:44 crc kubenswrapper[5014]: I1006 21:45:44.416753 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-7d4cc89fcb-ttqz7" Oct 06 21:45:44 crc kubenswrapper[5014]: I1006 21:45:44.700966 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-7d4cc89fcb-ttqz7"] Oct 06 21:45:44 crc kubenswrapper[5014]: I1006 21:45:44.800985 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-7d4cc89fcb-ttqz7" event={"ID":"8ddbd27d-bb5c-4d47-90bc-7215f30636aa","Type":"ContainerStarted","Data":"7db3e8328c633045ec03f620400c99084c7d9d6e433ab49d9903da6d570a6c36"} Oct 06 21:45:45 crc kubenswrapper[5014]: I1006 21:45:45.815046 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-7d4cc89fcb-ttqz7" event={"ID":"8ddbd27d-bb5c-4d47-90bc-7215f30636aa","Type":"ContainerStarted","Data":"bbdbc26d8f6cec4da3fe8849b2358fa8403daba96222f9423857cbcf8d54094c"} Oct 06 21:45:45 crc kubenswrapper[5014]: I1006 21:45:45.842773 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-7d4cc89fcb-ttqz7" podStartSLOduration=1.842742815 podStartE2EDuration="1.842742815s" podCreationTimestamp="2025-10-06 21:45:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:45:45.840782473 +0000 UTC m=+891.133819237" watchObservedRunningTime="2025-10-06 21:45:45.842742815 +0000 UTC m=+891.135779589" Oct 06 21:45:50 crc kubenswrapper[5014]: I1006 21:45:50.944999 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-pkx2l"] Oct 06 21:45:50 crc kubenswrapper[5014]: I1006 21:45:50.946533 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-pkx2l" Oct 06 21:45:50 crc kubenswrapper[5014]: I1006 21:45:50.949180 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-czrsm" Oct 06 21:45:50 crc kubenswrapper[5014]: I1006 21:45:50.949393 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Oct 06 21:45:50 crc kubenswrapper[5014]: I1006 21:45:50.950483 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Oct 06 21:45:50 crc kubenswrapper[5014]: I1006 21:45:50.966709 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-pkx2l"] Oct 06 21:45:51 crc kubenswrapper[5014]: I1006 21:45:51.050936 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7497\" (UniqueName: \"kubernetes.io/projected/ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414-kube-api-access-c7497\") pod \"openstack-operator-index-pkx2l\" (UID: \"ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414\") " pod="openstack-operators/openstack-operator-index-pkx2l" Oct 06 21:45:51 crc kubenswrapper[5014]: I1006 21:45:51.152361 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7497\" (UniqueName: \"kubernetes.io/projected/ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414-kube-api-access-c7497\") pod \"openstack-operator-index-pkx2l\" (UID: \"ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414\") " pod="openstack-operators/openstack-operator-index-pkx2l" Oct 06 21:45:51 crc kubenswrapper[5014]: I1006 21:45:51.175170 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7497\" (UniqueName: \"kubernetes.io/projected/ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414-kube-api-access-c7497\") pod \"openstack-operator-index-pkx2l\" (UID: \"ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414\") " pod="openstack-operators/openstack-operator-index-pkx2l" Oct 06 21:45:51 crc kubenswrapper[5014]: I1006 21:45:51.267713 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-pkx2l" Oct 06 21:45:51 crc kubenswrapper[5014]: I1006 21:45:51.767765 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-pkx2l"] Oct 06 21:45:51 crc kubenswrapper[5014]: I1006 21:45:51.868474 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-pkx2l" event={"ID":"ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414","Type":"ContainerStarted","Data":"20306ddedab5742fca3e9d7027eca30924a502872e713a614faf2800315c1204"} Oct 06 21:45:52 crc kubenswrapper[5014]: I1006 21:45:52.878798 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-pkx2l" event={"ID":"ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414","Type":"ContainerStarted","Data":"8ade8524a6d1c768be3672a8dfc8cdef4fe4774b03dc7ae25a0ab0d1c216c0fb"} Oct 06 21:45:52 crc kubenswrapper[5014]: I1006 21:45:52.898207 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-pkx2l" podStartSLOduration=2.145845898 podStartE2EDuration="2.898185987s" podCreationTimestamp="2025-10-06 21:45:50 +0000 UTC" firstStartedPulling="2025-10-06 21:45:51.785720777 +0000 UTC m=+897.078757551" lastFinishedPulling="2025-10-06 21:45:52.538060896 +0000 UTC m=+897.831097640" observedRunningTime="2025-10-06 21:45:52.893638822 +0000 UTC m=+898.186675566" watchObservedRunningTime="2025-10-06 21:45:52.898185987 +0000 UTC m=+898.191222721" Oct 06 21:45:55 crc kubenswrapper[5014]: I1006 21:45:55.106549 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-pkx2l"] Oct 06 21:45:55 crc kubenswrapper[5014]: I1006 21:45:55.107433 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-pkx2l" podUID="ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414" containerName="registry-server" containerID="cri-o://8ade8524a6d1c768be3672a8dfc8cdef4fe4774b03dc7ae25a0ab0d1c216c0fb" gracePeriod=2 Oct 06 21:45:55 crc kubenswrapper[5014]: I1006 21:45:55.594764 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-pkx2l" Oct 06 21:45:55 crc kubenswrapper[5014]: I1006 21:45:55.729367 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7497\" (UniqueName: \"kubernetes.io/projected/ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414-kube-api-access-c7497\") pod \"ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414\" (UID: \"ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414\") " Oct 06 21:45:55 crc kubenswrapper[5014]: I1006 21:45:55.736744 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414-kube-api-access-c7497" (OuterVolumeSpecName: "kube-api-access-c7497") pod "ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414" (UID: "ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414"). InnerVolumeSpecName "kube-api-access-c7497". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:45:55 crc kubenswrapper[5014]: I1006 21:45:55.831973 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7497\" (UniqueName: \"kubernetes.io/projected/ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414-kube-api-access-c7497\") on node \"crc\" DevicePath \"\"" Oct 06 21:45:55 crc kubenswrapper[5014]: I1006 21:45:55.903562 5014 generic.go:334] "Generic (PLEG): container finished" podID="ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414" containerID="8ade8524a6d1c768be3672a8dfc8cdef4fe4774b03dc7ae25a0ab0d1c216c0fb" exitCode=0 Oct 06 21:45:55 crc kubenswrapper[5014]: I1006 21:45:55.903607 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-pkx2l" event={"ID":"ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414","Type":"ContainerDied","Data":"8ade8524a6d1c768be3672a8dfc8cdef4fe4774b03dc7ae25a0ab0d1c216c0fb"} Oct 06 21:45:55 crc kubenswrapper[5014]: I1006 21:45:55.903644 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-pkx2l" event={"ID":"ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414","Type":"ContainerDied","Data":"20306ddedab5742fca3e9d7027eca30924a502872e713a614faf2800315c1204"} Oct 06 21:45:55 crc kubenswrapper[5014]: I1006 21:45:55.903662 5014 scope.go:117] "RemoveContainer" containerID="8ade8524a6d1c768be3672a8dfc8cdef4fe4774b03dc7ae25a0ab0d1c216c0fb" Oct 06 21:45:55 crc kubenswrapper[5014]: I1006 21:45:55.903673 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-pkx2l" Oct 06 21:45:55 crc kubenswrapper[5014]: I1006 21:45:55.929990 5014 scope.go:117] "RemoveContainer" containerID="8ade8524a6d1c768be3672a8dfc8cdef4fe4774b03dc7ae25a0ab0d1c216c0fb" Oct 06 21:45:55 crc kubenswrapper[5014]: E1006 21:45:55.930773 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ade8524a6d1c768be3672a8dfc8cdef4fe4774b03dc7ae25a0ab0d1c216c0fb\": container with ID starting with 8ade8524a6d1c768be3672a8dfc8cdef4fe4774b03dc7ae25a0ab0d1c216c0fb not found: ID does not exist" containerID="8ade8524a6d1c768be3672a8dfc8cdef4fe4774b03dc7ae25a0ab0d1c216c0fb" Oct 06 21:45:55 crc kubenswrapper[5014]: I1006 21:45:55.930900 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ade8524a6d1c768be3672a8dfc8cdef4fe4774b03dc7ae25a0ab0d1c216c0fb"} err="failed to get container status \"8ade8524a6d1c768be3672a8dfc8cdef4fe4774b03dc7ae25a0ab0d1c216c0fb\": rpc error: code = NotFound desc = could not find container \"8ade8524a6d1c768be3672a8dfc8cdef4fe4774b03dc7ae25a0ab0d1c216c0fb\": container with ID starting with 8ade8524a6d1c768be3672a8dfc8cdef4fe4774b03dc7ae25a0ab0d1c216c0fb not found: ID does not exist" Oct 06 21:45:55 crc kubenswrapper[5014]: I1006 21:45:55.940361 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-pkx2l"] Oct 06 21:45:55 crc kubenswrapper[5014]: I1006 21:45:55.944489 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-pkx2l"] Oct 06 21:45:56 crc kubenswrapper[5014]: I1006 21:45:56.119628 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-n4spq"] Oct 06 21:45:56 crc kubenswrapper[5014]: E1006 21:45:56.119888 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414" containerName="registry-server" Oct 06 21:45:56 crc kubenswrapper[5014]: I1006 21:45:56.119900 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414" containerName="registry-server" Oct 06 21:45:56 crc kubenswrapper[5014]: I1006 21:45:56.120000 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414" containerName="registry-server" Oct 06 21:45:56 crc kubenswrapper[5014]: I1006 21:45:56.120376 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-n4spq" Oct 06 21:45:56 crc kubenswrapper[5014]: I1006 21:45:56.122727 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Oct 06 21:45:56 crc kubenswrapper[5014]: I1006 21:45:56.123342 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-czrsm" Oct 06 21:45:56 crc kubenswrapper[5014]: I1006 21:45:56.123753 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Oct 06 21:45:56 crc kubenswrapper[5014]: I1006 21:45:56.133494 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-n4spq"] Oct 06 21:45:56 crc kubenswrapper[5014]: I1006 21:45:56.238780 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psbld\" (UniqueName: \"kubernetes.io/projected/b24a58b8-c8d7-4d2b-95ea-fa25ae191b21-kube-api-access-psbld\") pod \"openstack-operator-index-n4spq\" (UID: \"b24a58b8-c8d7-4d2b-95ea-fa25ae191b21\") " pod="openstack-operators/openstack-operator-index-n4spq" Oct 06 21:45:56 crc kubenswrapper[5014]: I1006 21:45:56.340365 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psbld\" (UniqueName: \"kubernetes.io/projected/b24a58b8-c8d7-4d2b-95ea-fa25ae191b21-kube-api-access-psbld\") pod \"openstack-operator-index-n4spq\" (UID: \"b24a58b8-c8d7-4d2b-95ea-fa25ae191b21\") " pod="openstack-operators/openstack-operator-index-n4spq" Oct 06 21:45:56 crc kubenswrapper[5014]: I1006 21:45:56.373070 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psbld\" (UniqueName: \"kubernetes.io/projected/b24a58b8-c8d7-4d2b-95ea-fa25ae191b21-kube-api-access-psbld\") pod \"openstack-operator-index-n4spq\" (UID: \"b24a58b8-c8d7-4d2b-95ea-fa25ae191b21\") " pod="openstack-operators/openstack-operator-index-n4spq" Oct 06 21:45:56 crc kubenswrapper[5014]: I1006 21:45:56.434432 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-n4spq" Oct 06 21:45:56 crc kubenswrapper[5014]: I1006 21:45:56.744897 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-n4spq"] Oct 06 21:45:56 crc kubenswrapper[5014]: I1006 21:45:56.913899 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-n4spq" event={"ID":"b24a58b8-c8d7-4d2b-95ea-fa25ae191b21","Type":"ContainerStarted","Data":"e88dea4dc5ef721730270c5702e0d86cc2b3012ee6aeabd8d0af797ea39afb67"} Oct 06 21:45:57 crc kubenswrapper[5014]: I1006 21:45:57.496154 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414" path="/var/lib/kubelet/pods/ba0f04e0-1e63-44c7-b2ac-4c9ae15b1414/volumes" Oct 06 21:45:57 crc kubenswrapper[5014]: I1006 21:45:57.920485 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-n4spq" event={"ID":"b24a58b8-c8d7-4d2b-95ea-fa25ae191b21","Type":"ContainerStarted","Data":"4509bc54e1a92e5da3f2d67fc62d2ffd7ae50391006e433f611940378b81838d"} Oct 06 21:45:57 crc kubenswrapper[5014]: I1006 21:45:57.941323 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-n4spq" podStartSLOduration=1.507117215 podStartE2EDuration="1.941297296s" podCreationTimestamp="2025-10-06 21:45:56 +0000 UTC" firstStartedPulling="2025-10-06 21:45:56.749712523 +0000 UTC m=+902.042749287" lastFinishedPulling="2025-10-06 21:45:57.183892594 +0000 UTC m=+902.476929368" observedRunningTime="2025-10-06 21:45:57.937875778 +0000 UTC m=+903.230912562" watchObservedRunningTime="2025-10-06 21:45:57.941297296 +0000 UTC m=+903.234334070" Oct 06 21:46:06 crc kubenswrapper[5014]: I1006 21:46:06.435663 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-n4spq" Oct 06 21:46:06 crc kubenswrapper[5014]: I1006 21:46:06.436189 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-n4spq" Oct 06 21:46:06 crc kubenswrapper[5014]: I1006 21:46:06.466966 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-n4spq" Oct 06 21:46:07 crc kubenswrapper[5014]: I1006 21:46:07.035938 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-n4spq" Oct 06 21:46:09 crc kubenswrapper[5014]: I1006 21:46:09.567862 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr"] Oct 06 21:46:09 crc kubenswrapper[5014]: I1006 21:46:09.569885 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" Oct 06 21:46:09 crc kubenswrapper[5014]: I1006 21:46:09.573365 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-lwcz5" Oct 06 21:46:09 crc kubenswrapper[5014]: I1006 21:46:09.587783 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr"] Oct 06 21:46:09 crc kubenswrapper[5014]: I1006 21:46:09.663934 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2d7d7f9e-7639-4f0c-be5c-567d0120316a-util\") pod \"039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr\" (UID: \"2d7d7f9e-7639-4f0c-be5c-567d0120316a\") " pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" Oct 06 21:46:09 crc kubenswrapper[5014]: I1006 21:46:09.663985 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2d7d7f9e-7639-4f0c-be5c-567d0120316a-bundle\") pod \"039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr\" (UID: \"2d7d7f9e-7639-4f0c-be5c-567d0120316a\") " pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" Oct 06 21:46:09 crc kubenswrapper[5014]: I1006 21:46:09.664037 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x27tp\" (UniqueName: \"kubernetes.io/projected/2d7d7f9e-7639-4f0c-be5c-567d0120316a-kube-api-access-x27tp\") pod \"039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr\" (UID: \"2d7d7f9e-7639-4f0c-be5c-567d0120316a\") " pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" Oct 06 21:46:09 crc kubenswrapper[5014]: I1006 21:46:09.765478 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x27tp\" (UniqueName: \"kubernetes.io/projected/2d7d7f9e-7639-4f0c-be5c-567d0120316a-kube-api-access-x27tp\") pod \"039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr\" (UID: \"2d7d7f9e-7639-4f0c-be5c-567d0120316a\") " pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" Oct 06 21:46:09 crc kubenswrapper[5014]: I1006 21:46:09.765868 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2d7d7f9e-7639-4f0c-be5c-567d0120316a-util\") pod \"039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr\" (UID: \"2d7d7f9e-7639-4f0c-be5c-567d0120316a\") " pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" Oct 06 21:46:09 crc kubenswrapper[5014]: I1006 21:46:09.765988 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2d7d7f9e-7639-4f0c-be5c-567d0120316a-bundle\") pod \"039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr\" (UID: \"2d7d7f9e-7639-4f0c-be5c-567d0120316a\") " pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" Oct 06 21:46:09 crc kubenswrapper[5014]: I1006 21:46:09.766488 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2d7d7f9e-7639-4f0c-be5c-567d0120316a-bundle\") pod \"039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr\" (UID: \"2d7d7f9e-7639-4f0c-be5c-567d0120316a\") " pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" Oct 06 21:46:09 crc kubenswrapper[5014]: I1006 21:46:09.766549 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2d7d7f9e-7639-4f0c-be5c-567d0120316a-util\") pod \"039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr\" (UID: \"2d7d7f9e-7639-4f0c-be5c-567d0120316a\") " pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" Oct 06 21:46:09 crc kubenswrapper[5014]: I1006 21:46:09.791261 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x27tp\" (UniqueName: \"kubernetes.io/projected/2d7d7f9e-7639-4f0c-be5c-567d0120316a-kube-api-access-x27tp\") pod \"039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr\" (UID: \"2d7d7f9e-7639-4f0c-be5c-567d0120316a\") " pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" Oct 06 21:46:09 crc kubenswrapper[5014]: I1006 21:46:09.900267 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" Oct 06 21:46:10 crc kubenswrapper[5014]: I1006 21:46:10.239059 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr"] Oct 06 21:46:11 crc kubenswrapper[5014]: I1006 21:46:11.031000 5014 generic.go:334] "Generic (PLEG): container finished" podID="2d7d7f9e-7639-4f0c-be5c-567d0120316a" containerID="ed7fddc82a3fefc825eb7d1d0ce27a9eb1976b390e3c7198cf550638cb3abf80" exitCode=0 Oct 06 21:46:11 crc kubenswrapper[5014]: I1006 21:46:11.031117 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" event={"ID":"2d7d7f9e-7639-4f0c-be5c-567d0120316a","Type":"ContainerDied","Data":"ed7fddc82a3fefc825eb7d1d0ce27a9eb1976b390e3c7198cf550638cb3abf80"} Oct 06 21:46:11 crc kubenswrapper[5014]: I1006 21:46:11.031533 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" event={"ID":"2d7d7f9e-7639-4f0c-be5c-567d0120316a","Type":"ContainerStarted","Data":"75fad6a4917005cdc38ec57db284f00e02f1b375818bf7ad59232024baf538a6"} Oct 06 21:46:12 crc kubenswrapper[5014]: I1006 21:46:12.046137 5014 generic.go:334] "Generic (PLEG): container finished" podID="2d7d7f9e-7639-4f0c-be5c-567d0120316a" containerID="0f08765881e59c88a3439bb390e6a21982b5b66a562ac92fd18974413bda6a52" exitCode=0 Oct 06 21:46:12 crc kubenswrapper[5014]: I1006 21:46:12.046205 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" event={"ID":"2d7d7f9e-7639-4f0c-be5c-567d0120316a","Type":"ContainerDied","Data":"0f08765881e59c88a3439bb390e6a21982b5b66a562ac92fd18974413bda6a52"} Oct 06 21:46:13 crc kubenswrapper[5014]: I1006 21:46:13.060129 5014 generic.go:334] "Generic (PLEG): container finished" podID="2d7d7f9e-7639-4f0c-be5c-567d0120316a" containerID="d31b2849d88376f97a476ec3030e5fc22fe315cc312c0f0da63abbe6aa16db92" exitCode=0 Oct 06 21:46:13 crc kubenswrapper[5014]: I1006 21:46:13.060247 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" event={"ID":"2d7d7f9e-7639-4f0c-be5c-567d0120316a","Type":"ContainerDied","Data":"d31b2849d88376f97a476ec3030e5fc22fe315cc312c0f0da63abbe6aa16db92"} Oct 06 21:46:14 crc kubenswrapper[5014]: I1006 21:46:14.400965 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" Oct 06 21:46:14 crc kubenswrapper[5014]: I1006 21:46:14.443159 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2d7d7f9e-7639-4f0c-be5c-567d0120316a-util\") pod \"2d7d7f9e-7639-4f0c-be5c-567d0120316a\" (UID: \"2d7d7f9e-7639-4f0c-be5c-567d0120316a\") " Oct 06 21:46:14 crc kubenswrapper[5014]: I1006 21:46:14.443321 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2d7d7f9e-7639-4f0c-be5c-567d0120316a-bundle\") pod \"2d7d7f9e-7639-4f0c-be5c-567d0120316a\" (UID: \"2d7d7f9e-7639-4f0c-be5c-567d0120316a\") " Oct 06 21:46:14 crc kubenswrapper[5014]: I1006 21:46:14.443446 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x27tp\" (UniqueName: \"kubernetes.io/projected/2d7d7f9e-7639-4f0c-be5c-567d0120316a-kube-api-access-x27tp\") pod \"2d7d7f9e-7639-4f0c-be5c-567d0120316a\" (UID: \"2d7d7f9e-7639-4f0c-be5c-567d0120316a\") " Oct 06 21:46:14 crc kubenswrapper[5014]: I1006 21:46:14.444566 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d7d7f9e-7639-4f0c-be5c-567d0120316a-bundle" (OuterVolumeSpecName: "bundle") pod "2d7d7f9e-7639-4f0c-be5c-567d0120316a" (UID: "2d7d7f9e-7639-4f0c-be5c-567d0120316a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:46:14 crc kubenswrapper[5014]: I1006 21:46:14.452379 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d7d7f9e-7639-4f0c-be5c-567d0120316a-kube-api-access-x27tp" (OuterVolumeSpecName: "kube-api-access-x27tp") pod "2d7d7f9e-7639-4f0c-be5c-567d0120316a" (UID: "2d7d7f9e-7639-4f0c-be5c-567d0120316a"). InnerVolumeSpecName "kube-api-access-x27tp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:46:14 crc kubenswrapper[5014]: I1006 21:46:14.473308 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d7d7f9e-7639-4f0c-be5c-567d0120316a-util" (OuterVolumeSpecName: "util") pod "2d7d7f9e-7639-4f0c-be5c-567d0120316a" (UID: "2d7d7f9e-7639-4f0c-be5c-567d0120316a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:46:14 crc kubenswrapper[5014]: I1006 21:46:14.545402 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x27tp\" (UniqueName: \"kubernetes.io/projected/2d7d7f9e-7639-4f0c-be5c-567d0120316a-kube-api-access-x27tp\") on node \"crc\" DevicePath \"\"" Oct 06 21:46:14 crc kubenswrapper[5014]: I1006 21:46:14.545460 5014 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2d7d7f9e-7639-4f0c-be5c-567d0120316a-util\") on node \"crc\" DevicePath \"\"" Oct 06 21:46:14 crc kubenswrapper[5014]: I1006 21:46:14.545480 5014 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2d7d7f9e-7639-4f0c-be5c-567d0120316a-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:46:15 crc kubenswrapper[5014]: I1006 21:46:15.078720 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" event={"ID":"2d7d7f9e-7639-4f0c-be5c-567d0120316a","Type":"ContainerDied","Data":"75fad6a4917005cdc38ec57db284f00e02f1b375818bf7ad59232024baf538a6"} Oct 06 21:46:15 crc kubenswrapper[5014]: I1006 21:46:15.078768 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75fad6a4917005cdc38ec57db284f00e02f1b375818bf7ad59232024baf538a6" Oct 06 21:46:15 crc kubenswrapper[5014]: I1006 21:46:15.078804 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr" Oct 06 21:46:17 crc kubenswrapper[5014]: I1006 21:46:17.621663 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-69f5b7986c-tgvqq"] Oct 06 21:46:17 crc kubenswrapper[5014]: E1006 21:46:17.622436 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d7d7f9e-7639-4f0c-be5c-567d0120316a" containerName="extract" Oct 06 21:46:17 crc kubenswrapper[5014]: I1006 21:46:17.622454 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d7d7f9e-7639-4f0c-be5c-567d0120316a" containerName="extract" Oct 06 21:46:17 crc kubenswrapper[5014]: E1006 21:46:17.622663 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d7d7f9e-7639-4f0c-be5c-567d0120316a" containerName="pull" Oct 06 21:46:17 crc kubenswrapper[5014]: I1006 21:46:17.622673 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d7d7f9e-7639-4f0c-be5c-567d0120316a" containerName="pull" Oct 06 21:46:17 crc kubenswrapper[5014]: E1006 21:46:17.622702 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d7d7f9e-7639-4f0c-be5c-567d0120316a" containerName="util" Oct 06 21:46:17 crc kubenswrapper[5014]: I1006 21:46:17.622709 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d7d7f9e-7639-4f0c-be5c-567d0120316a" containerName="util" Oct 06 21:46:17 crc kubenswrapper[5014]: I1006 21:46:17.622845 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d7d7f9e-7639-4f0c-be5c-567d0120316a" containerName="extract" Oct 06 21:46:17 crc kubenswrapper[5014]: I1006 21:46:17.623660 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-69f5b7986c-tgvqq" Oct 06 21:46:17 crc kubenswrapper[5014]: I1006 21:46:17.626002 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-bw4nt" Oct 06 21:46:17 crc kubenswrapper[5014]: I1006 21:46:17.648407 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-69f5b7986c-tgvqq"] Oct 06 21:46:17 crc kubenswrapper[5014]: I1006 21:46:17.688081 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5qml\" (UniqueName: \"kubernetes.io/projected/b683d4c8-6991-42f2-bd7d-968080905450-kube-api-access-l5qml\") pod \"openstack-operator-controller-operator-69f5b7986c-tgvqq\" (UID: \"b683d4c8-6991-42f2-bd7d-968080905450\") " pod="openstack-operators/openstack-operator-controller-operator-69f5b7986c-tgvqq" Oct 06 21:46:17 crc kubenswrapper[5014]: I1006 21:46:17.789784 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5qml\" (UniqueName: \"kubernetes.io/projected/b683d4c8-6991-42f2-bd7d-968080905450-kube-api-access-l5qml\") pod \"openstack-operator-controller-operator-69f5b7986c-tgvqq\" (UID: \"b683d4c8-6991-42f2-bd7d-968080905450\") " pod="openstack-operators/openstack-operator-controller-operator-69f5b7986c-tgvqq" Oct 06 21:46:17 crc kubenswrapper[5014]: I1006 21:46:17.828469 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5qml\" (UniqueName: \"kubernetes.io/projected/b683d4c8-6991-42f2-bd7d-968080905450-kube-api-access-l5qml\") pod \"openstack-operator-controller-operator-69f5b7986c-tgvqq\" (UID: \"b683d4c8-6991-42f2-bd7d-968080905450\") " pod="openstack-operators/openstack-operator-controller-operator-69f5b7986c-tgvqq" Oct 06 21:46:17 crc kubenswrapper[5014]: I1006 21:46:17.940833 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-69f5b7986c-tgvqq" Oct 06 21:46:18 crc kubenswrapper[5014]: I1006 21:46:18.416712 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-69f5b7986c-tgvqq"] Oct 06 21:46:18 crc kubenswrapper[5014]: W1006 21:46:18.445558 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb683d4c8_6991_42f2_bd7d_968080905450.slice/crio-928a36bd72f960418aafe2ab032a84d72852f48233d4501f1c5dbcaed40013c0 WatchSource:0}: Error finding container 928a36bd72f960418aafe2ab032a84d72852f48233d4501f1c5dbcaed40013c0: Status 404 returned error can't find the container with id 928a36bd72f960418aafe2ab032a84d72852f48233d4501f1c5dbcaed40013c0 Oct 06 21:46:19 crc kubenswrapper[5014]: I1006 21:46:19.108339 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-69f5b7986c-tgvqq" event={"ID":"b683d4c8-6991-42f2-bd7d-968080905450","Type":"ContainerStarted","Data":"928a36bd72f960418aafe2ab032a84d72852f48233d4501f1c5dbcaed40013c0"} Oct 06 21:46:23 crc kubenswrapper[5014]: I1006 21:46:23.137682 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-69f5b7986c-tgvqq" event={"ID":"b683d4c8-6991-42f2-bd7d-968080905450","Type":"ContainerStarted","Data":"e5048ad551dacc0e4a09fa8efa776c7a13ec23278e4d457f08e8c04ce93aaacf"} Oct 06 21:46:25 crc kubenswrapper[5014]: I1006 21:46:25.159051 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-69f5b7986c-tgvqq" event={"ID":"b683d4c8-6991-42f2-bd7d-968080905450","Type":"ContainerStarted","Data":"6e6aa65f0043f08d77000b2974b48e79f9a51af1a7f256d3b0ce59ef1069a466"} Oct 06 21:46:25 crc kubenswrapper[5014]: I1006 21:46:25.159724 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-69f5b7986c-tgvqq" Oct 06 21:46:25 crc kubenswrapper[5014]: I1006 21:46:25.195354 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-69f5b7986c-tgvqq" podStartSLOduration=1.7945533359999999 podStartE2EDuration="8.195331142s" podCreationTimestamp="2025-10-06 21:46:17 +0000 UTC" firstStartedPulling="2025-10-06 21:46:18.451688053 +0000 UTC m=+923.744724787" lastFinishedPulling="2025-10-06 21:46:24.852465859 +0000 UTC m=+930.145502593" observedRunningTime="2025-10-06 21:46:25.189147116 +0000 UTC m=+930.482183890" watchObservedRunningTime="2025-10-06 21:46:25.195331142 +0000 UTC m=+930.488367916" Oct 06 21:46:27 crc kubenswrapper[5014]: I1006 21:46:27.178397 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-69f5b7986c-tgvqq" Oct 06 21:46:43 crc kubenswrapper[5014]: I1006 21:46:43.926240 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-64f56ff694-fbnjx"] Oct 06 21:46:43 crc kubenswrapper[5014]: I1006 21:46:43.927686 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-64f56ff694-fbnjx" Oct 06 21:46:43 crc kubenswrapper[5014]: I1006 21:46:43.930591 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-5jn4c" Oct 06 21:46:43 crc kubenswrapper[5014]: I1006 21:46:43.933033 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-84bd8f6848-wpcz6"] Oct 06 21:46:43 crc kubenswrapper[5014]: I1006 21:46:43.934260 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-84bd8f6848-wpcz6" Oct 06 21:46:43 crc kubenswrapper[5014]: I1006 21:46:43.937267 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-8plh4" Oct 06 21:46:43 crc kubenswrapper[5014]: I1006 21:46:43.964091 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7j46\" (UniqueName: \"kubernetes.io/projected/e03f73aa-fc56-435f-8f50-e55a813a4b0c-kube-api-access-c7j46\") pod \"barbican-operator-controller-manager-64f56ff694-fbnjx\" (UID: \"e03f73aa-fc56-435f-8f50-e55a813a4b0c\") " pod="openstack-operators/barbican-operator-controller-manager-64f56ff694-fbnjx" Oct 06 21:46:43 crc kubenswrapper[5014]: I1006 21:46:43.964177 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jlws\" (UniqueName: \"kubernetes.io/projected/89c6e73c-a5c8-4909-85e5-48118abefc95-kube-api-access-9jlws\") pod \"cinder-operator-controller-manager-84bd8f6848-wpcz6\" (UID: \"89c6e73c-a5c8-4909-85e5-48118abefc95\") " pod="openstack-operators/cinder-operator-controller-manager-84bd8f6848-wpcz6" Oct 06 21:46:43 crc kubenswrapper[5014]: I1006 21:46:43.968012 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-64f56ff694-fbnjx"] Oct 06 21:46:43 crc kubenswrapper[5014]: I1006 21:46:43.974320 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-58d86cd59d-8w8ws"] Oct 06 21:46:43 crc kubenswrapper[5014]: I1006 21:46:43.975364 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-58d86cd59d-8w8ws" Oct 06 21:46:43 crc kubenswrapper[5014]: I1006 21:46:43.979150 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-tbh6p" Oct 06 21:46:43 crc kubenswrapper[5014]: I1006 21:46:43.983401 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-84bd8f6848-wpcz6"] Oct 06 21:46:43 crc kubenswrapper[5014]: I1006 21:46:43.999443 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-58d86cd59d-8w8ws"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.012356 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-fd648f65-7qqn8"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.025096 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-7ccfc8cf49-9k728"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.025864 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-7ccfc8cf49-9k728" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.026264 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-fd648f65-7qqn8" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.028967 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-fmsxs" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.029217 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-jqzbw" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.039740 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-fd648f65-7qqn8"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.050677 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-7ccfc8cf49-9k728"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.065670 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjqv4\" (UniqueName: \"kubernetes.io/projected/89fc8a32-1a12-4d35-b7fe-2e1fa829c4b7-kube-api-access-kjqv4\") pod \"heat-operator-controller-manager-7ccfc8cf49-9k728\" (UID: \"89fc8a32-1a12-4d35-b7fe-2e1fa829c4b7\") " pod="openstack-operators/heat-operator-controller-manager-7ccfc8cf49-9k728" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.065733 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7j46\" (UniqueName: \"kubernetes.io/projected/e03f73aa-fc56-435f-8f50-e55a813a4b0c-kube-api-access-c7j46\") pod \"barbican-operator-controller-manager-64f56ff694-fbnjx\" (UID: \"e03f73aa-fc56-435f-8f50-e55a813a4b0c\") " pod="openstack-operators/barbican-operator-controller-manager-64f56ff694-fbnjx" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.065761 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9ktb\" (UniqueName: \"kubernetes.io/projected/3e1db7a5-54f5-4e2c-9edc-3a5f7ce19d9e-kube-api-access-c9ktb\") pod \"glance-operator-controller-manager-fd648f65-7qqn8\" (UID: \"3e1db7a5-54f5-4e2c-9edc-3a5f7ce19d9e\") " pod="openstack-operators/glance-operator-controller-manager-fd648f65-7qqn8" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.065826 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jlws\" (UniqueName: \"kubernetes.io/projected/89c6e73c-a5c8-4909-85e5-48118abefc95-kube-api-access-9jlws\") pod \"cinder-operator-controller-manager-84bd8f6848-wpcz6\" (UID: \"89c6e73c-a5c8-4909-85e5-48118abefc95\") " pod="openstack-operators/cinder-operator-controller-manager-84bd8f6848-wpcz6" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.065872 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mb226\" (UniqueName: \"kubernetes.io/projected/6aea271b-eb7b-4e06-8bbb-65807a8027b6-kube-api-access-mb226\") pod \"designate-operator-controller-manager-58d86cd59d-8w8ws\" (UID: \"6aea271b-eb7b-4e06-8bbb-65807a8027b6\") " pod="openstack-operators/designate-operator-controller-manager-58d86cd59d-8w8ws" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.070923 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5b477879bc-t5mpw"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.071846 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5b477879bc-t5mpw" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.073202 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-crtbm" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.074683 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.075709 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.077140 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-9ftrq" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.077211 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.088078 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7j46\" (UniqueName: \"kubernetes.io/projected/e03f73aa-fc56-435f-8f50-e55a813a4b0c-kube-api-access-c7j46\") pod \"barbican-operator-controller-manager-64f56ff694-fbnjx\" (UID: \"e03f73aa-fc56-435f-8f50-e55a813a4b0c\") " pod="openstack-operators/barbican-operator-controller-manager-64f56ff694-fbnjx" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.093416 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jlws\" (UniqueName: \"kubernetes.io/projected/89c6e73c-a5c8-4909-85e5-48118abefc95-kube-api-access-9jlws\") pod \"cinder-operator-controller-manager-84bd8f6848-wpcz6\" (UID: \"89c6e73c-a5c8-4909-85e5-48118abefc95\") " pod="openstack-operators/cinder-operator-controller-manager-84bd8f6848-wpcz6" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.112871 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5b477879bc-t5mpw"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.121685 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5467f8988c-qzk8x"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.122655 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5467f8988c-qzk8x" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.124930 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-wpxjr" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.134715 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.144576 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5b84cc7657-ml985"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.145664 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-5b84cc7657-ml985" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.152965 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-hh7l9" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.157258 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5467f8988c-qzk8x"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.167259 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqdlb\" (UniqueName: \"kubernetes.io/projected/df9f59e9-4a23-4204-be0f-5f2729f419b1-kube-api-access-bqdlb\") pod \"infra-operator-controller-manager-84788b6bc5-zbcqh\" (UID: \"df9f59e9-4a23-4204-be0f-5f2729f419b1\") " pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.167348 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ths6q\" (UniqueName: \"kubernetes.io/projected/e134fc2c-d3a1-41d2-bf11-257d15bc68c8-kube-api-access-ths6q\") pod \"horizon-operator-controller-manager-5b477879bc-t5mpw\" (UID: \"e134fc2c-d3a1-41d2-bf11-257d15bc68c8\") " pod="openstack-operators/horizon-operator-controller-manager-5b477879bc-t5mpw" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.167395 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mb226\" (UniqueName: \"kubernetes.io/projected/6aea271b-eb7b-4e06-8bbb-65807a8027b6-kube-api-access-mb226\") pod \"designate-operator-controller-manager-58d86cd59d-8w8ws\" (UID: \"6aea271b-eb7b-4e06-8bbb-65807a8027b6\") " pod="openstack-operators/designate-operator-controller-manager-58d86cd59d-8w8ws" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.167446 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjqv4\" (UniqueName: \"kubernetes.io/projected/89fc8a32-1a12-4d35-b7fe-2e1fa829c4b7-kube-api-access-kjqv4\") pod \"heat-operator-controller-manager-7ccfc8cf49-9k728\" (UID: \"89fc8a32-1a12-4d35-b7fe-2e1fa829c4b7\") " pod="openstack-operators/heat-operator-controller-manager-7ccfc8cf49-9k728" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.167480 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9ktb\" (UniqueName: \"kubernetes.io/projected/3e1db7a5-54f5-4e2c-9edc-3a5f7ce19d9e-kube-api-access-c9ktb\") pod \"glance-operator-controller-manager-fd648f65-7qqn8\" (UID: \"3e1db7a5-54f5-4e2c-9edc-3a5f7ce19d9e\") " pod="openstack-operators/glance-operator-controller-manager-fd648f65-7qqn8" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.167539 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7v9r\" (UniqueName: \"kubernetes.io/projected/df7496c8-460c-4f74-83a1-bb1eefeac13c-kube-api-access-k7v9r\") pod \"ironic-operator-controller-manager-5467f8988c-qzk8x\" (UID: \"df7496c8-460c-4f74-83a1-bb1eefeac13c\") " pod="openstack-operators/ironic-operator-controller-manager-5467f8988c-qzk8x" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.167589 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/df9f59e9-4a23-4204-be0f-5f2729f419b1-cert\") pod \"infra-operator-controller-manager-84788b6bc5-zbcqh\" (UID: \"df9f59e9-4a23-4204-be0f-5f2729f419b1\") " pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.167612 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5wq5\" (UniqueName: \"kubernetes.io/projected/1afbb4e8-7c9b-4521-9496-d0df6b2003bb-kube-api-access-h5wq5\") pod \"keystone-operator-controller-manager-5b84cc7657-ml985\" (UID: \"1afbb4e8-7c9b-4521-9496-d0df6b2003bb\") " pod="openstack-operators/keystone-operator-controller-manager-5b84cc7657-ml985" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.171658 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.181127 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.185657 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5b84cc7657-ml985"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.188212 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-sdshp" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.189691 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.192346 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjqv4\" (UniqueName: \"kubernetes.io/projected/89fc8a32-1a12-4d35-b7fe-2e1fa829c4b7-kube-api-access-kjqv4\") pod \"heat-operator-controller-manager-7ccfc8cf49-9k728\" (UID: \"89fc8a32-1a12-4d35-b7fe-2e1fa829c4b7\") " pod="openstack-operators/heat-operator-controller-manager-7ccfc8cf49-9k728" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.194093 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mb226\" (UniqueName: \"kubernetes.io/projected/6aea271b-eb7b-4e06-8bbb-65807a8027b6-kube-api-access-mb226\") pod \"designate-operator-controller-manager-58d86cd59d-8w8ws\" (UID: \"6aea271b-eb7b-4e06-8bbb-65807a8027b6\") " pod="openstack-operators/designate-operator-controller-manager-58d86cd59d-8w8ws" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.201789 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9ktb\" (UniqueName: \"kubernetes.io/projected/3e1db7a5-54f5-4e2c-9edc-3a5f7ce19d9e-kube-api-access-c9ktb\") pod \"glance-operator-controller-manager-fd648f65-7qqn8\" (UID: \"3e1db7a5-54f5-4e2c-9edc-3a5f7ce19d9e\") " pod="openstack-operators/glance-operator-controller-manager-fd648f65-7qqn8" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.209992 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-69b956fbf6-8fg8b"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.210915 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-69b956fbf6-8fg8b" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.213536 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-2bdmf" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.226684 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-d6c9dc5bc-jngd6"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.227716 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-d6c9dc5bc-jngd6" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.233911 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-ssg9c" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.236601 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-69b956fbf6-8fg8b"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.262468 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-d6c9dc5bc-jngd6"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.269211 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7v9r\" (UniqueName: \"kubernetes.io/projected/df7496c8-460c-4f74-83a1-bb1eefeac13c-kube-api-access-k7v9r\") pod \"ironic-operator-controller-manager-5467f8988c-qzk8x\" (UID: \"df7496c8-460c-4f74-83a1-bb1eefeac13c\") " pod="openstack-operators/ironic-operator-controller-manager-5467f8988c-qzk8x" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.269263 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcx65\" (UniqueName: \"kubernetes.io/projected/031743a5-fa78-4f1e-aaba-2b53f308a1b3-kube-api-access-jcx65\") pod \"mariadb-operator-controller-manager-d6c9dc5bc-jngd6\" (UID: \"031743a5-fa78-4f1e-aaba-2b53f308a1b3\") " pod="openstack-operators/mariadb-operator-controller-manager-d6c9dc5bc-jngd6" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.269287 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/df9f59e9-4a23-4204-be0f-5f2729f419b1-cert\") pod \"infra-operator-controller-manager-84788b6bc5-zbcqh\" (UID: \"df9f59e9-4a23-4204-be0f-5f2729f419b1\") " pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.269304 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5wq5\" (UniqueName: \"kubernetes.io/projected/1afbb4e8-7c9b-4521-9496-d0df6b2003bb-kube-api-access-h5wq5\") pod \"keystone-operator-controller-manager-5b84cc7657-ml985\" (UID: \"1afbb4e8-7c9b-4521-9496-d0df6b2003bb\") " pod="openstack-operators/keystone-operator-controller-manager-5b84cc7657-ml985" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.269325 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqdlb\" (UniqueName: \"kubernetes.io/projected/df9f59e9-4a23-4204-be0f-5f2729f419b1-kube-api-access-bqdlb\") pod \"infra-operator-controller-manager-84788b6bc5-zbcqh\" (UID: \"df9f59e9-4a23-4204-be0f-5f2729f419b1\") " pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.269347 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttwjx\" (UniqueName: \"kubernetes.io/projected/bade47f3-c50b-4f05-acfb-192e11c5b9e6-kube-api-access-ttwjx\") pod \"manila-operator-controller-manager-7cb48dbc-5b2mb\" (UID: \"bade47f3-c50b-4f05-acfb-192e11c5b9e6\") " pod="openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.269367 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ths6q\" (UniqueName: \"kubernetes.io/projected/e134fc2c-d3a1-41d2-bf11-257d15bc68c8-kube-api-access-ths6q\") pod \"horizon-operator-controller-manager-5b477879bc-t5mpw\" (UID: \"e134fc2c-d3a1-41d2-bf11-257d15bc68c8\") " pod="openstack-operators/horizon-operator-controller-manager-5b477879bc-t5mpw" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.269426 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tr6v9\" (UniqueName: \"kubernetes.io/projected/bea4afa0-51fb-4d29-84ba-807b4adc79cd-kube-api-access-tr6v9\") pod \"neutron-operator-controller-manager-69b956fbf6-8fg8b\" (UID: \"bea4afa0-51fb-4d29-84ba-807b4adc79cd\") " pod="openstack-operators/neutron-operator-controller-manager-69b956fbf6-8fg8b" Oct 06 21:46:44 crc kubenswrapper[5014]: E1006 21:46:44.269820 5014 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Oct 06 21:46:44 crc kubenswrapper[5014]: E1006 21:46:44.269862 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/df9f59e9-4a23-4204-be0f-5f2729f419b1-cert podName:df9f59e9-4a23-4204-be0f-5f2729f419b1 nodeName:}" failed. No retries permitted until 2025-10-06 21:46:44.769847132 +0000 UTC m=+950.062883866 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/df9f59e9-4a23-4204-be0f-5f2729f419b1-cert") pod "infra-operator-controller-manager-84788b6bc5-zbcqh" (UID: "df9f59e9-4a23-4204-be0f-5f2729f419b1") : secret "infra-operator-webhook-server-cert" not found Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.272905 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-64f56ff694-fbnjx" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.279448 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-84bd8f6848-wpcz6" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.289110 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.290428 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.292480 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-58d86cd59d-8w8ws" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.292867 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqdlb\" (UniqueName: \"kubernetes.io/projected/df9f59e9-4a23-4204-be0f-5f2729f419b1-kube-api-access-bqdlb\") pod \"infra-operator-controller-manager-84788b6bc5-zbcqh\" (UID: \"df9f59e9-4a23-4204-be0f-5f2729f419b1\") " pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.293086 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-wdnpl" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.295978 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.303048 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ths6q\" (UniqueName: \"kubernetes.io/projected/e134fc2c-d3a1-41d2-bf11-257d15bc68c8-kube-api-access-ths6q\") pod \"horizon-operator-controller-manager-5b477879bc-t5mpw\" (UID: \"e134fc2c-d3a1-41d2-bf11-257d15bc68c8\") " pod="openstack-operators/horizon-operator-controller-manager-5b477879bc-t5mpw" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.303122 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.304145 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.304988 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7v9r\" (UniqueName: \"kubernetes.io/projected/df7496c8-460c-4f74-83a1-bb1eefeac13c-kube-api-access-k7v9r\") pod \"ironic-operator-controller-manager-5467f8988c-qzk8x\" (UID: \"df7496c8-460c-4f74-83a1-bb1eefeac13c\") " pod="openstack-operators/ironic-operator-controller-manager-5467f8988c-qzk8x" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.306221 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5wq5\" (UniqueName: \"kubernetes.io/projected/1afbb4e8-7c9b-4521-9496-d0df6b2003bb-kube-api-access-h5wq5\") pod \"keystone-operator-controller-manager-5b84cc7657-ml985\" (UID: \"1afbb4e8-7c9b-4521-9496-d0df6b2003bb\") " pod="openstack-operators/keystone-operator-controller-manager-5b84cc7657-ml985" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.307117 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-6sdxw" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.324023 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.332839 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54d485fd9-mq79t"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.333898 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-54d485fd9-mq79t" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.340482 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-mp6pf" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.344492 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.351948 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-7ccfc8cf49-9k728" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.358153 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.360548 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.360787 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-k2grv" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.365769 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54d485fd9-mq79t"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.366957 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-fd648f65-7qqn8" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.370505 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sr4ks\" (UniqueName: \"kubernetes.io/projected/b46c946e-8936-434c-8a13-0670857929d4-kube-api-access-sr4ks\") pod \"octavia-operator-controller-manager-69f59f9d8-s6hxr\" (UID: \"b46c946e-8936-434c-8a13-0670857929d4\") " pod="openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.370548 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tr6v9\" (UniqueName: \"kubernetes.io/projected/bea4afa0-51fb-4d29-84ba-807b4adc79cd-kube-api-access-tr6v9\") pod \"neutron-operator-controller-manager-69b956fbf6-8fg8b\" (UID: \"bea4afa0-51fb-4d29-84ba-807b4adc79cd\") " pod="openstack-operators/neutron-operator-controller-manager-69b956fbf6-8fg8b" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.370572 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kh5jz\" (UniqueName: \"kubernetes.io/projected/c4de151c-737e-4ff2-8445-571ec2a5a8cd-kube-api-access-kh5jz\") pod \"openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd\" (UID: \"c4de151c-737e-4ff2-8445-571ec2a5a8cd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.370597 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6769b\" (UniqueName: \"kubernetes.io/projected/840c119a-0407-45bd-9588-795a38db80a8-kube-api-access-6769b\") pod \"ovn-operator-controller-manager-54d485fd9-mq79t\" (UID: \"840c119a-0407-45bd-9588-795a38db80a8\") " pod="openstack-operators/ovn-operator-controller-manager-54d485fd9-mq79t" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.370647 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcx65\" (UniqueName: \"kubernetes.io/projected/031743a5-fa78-4f1e-aaba-2b53f308a1b3-kube-api-access-jcx65\") pod \"mariadb-operator-controller-manager-d6c9dc5bc-jngd6\" (UID: \"031743a5-fa78-4f1e-aaba-2b53f308a1b3\") " pod="openstack-operators/mariadb-operator-controller-manager-d6c9dc5bc-jngd6" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.370691 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttwjx\" (UniqueName: \"kubernetes.io/projected/bade47f3-c50b-4f05-acfb-192e11c5b9e6-kube-api-access-ttwjx\") pod \"manila-operator-controller-manager-7cb48dbc-5b2mb\" (UID: \"bade47f3-c50b-4f05-acfb-192e11c5b9e6\") " pod="openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.370724 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c4de151c-737e-4ff2-8445-571ec2a5a8cd-cert\") pod \"openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd\" (UID: \"c4de151c-737e-4ff2-8445-571ec2a5a8cd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.370747 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fp6rr\" (UniqueName: \"kubernetes.io/projected/b5c83451-721d-4897-b4b9-996ee0d7ae94-kube-api-access-fp6rr\") pod \"nova-operator-controller-manager-6c9b57c67-hqrxh\" (UID: \"b5c83451-721d-4897-b4b9-996ee0d7ae94\") " pod="openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.389517 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttwjx\" (UniqueName: \"kubernetes.io/projected/bade47f3-c50b-4f05-acfb-192e11c5b9e6-kube-api-access-ttwjx\") pod \"manila-operator-controller-manager-7cb48dbc-5b2mb\" (UID: \"bade47f3-c50b-4f05-acfb-192e11c5b9e6\") " pod="openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.390494 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tr6v9\" (UniqueName: \"kubernetes.io/projected/bea4afa0-51fb-4d29-84ba-807b4adc79cd-kube-api-access-tr6v9\") pod \"neutron-operator-controller-manager-69b956fbf6-8fg8b\" (UID: \"bea4afa0-51fb-4d29-84ba-807b4adc79cd\") " pod="openstack-operators/neutron-operator-controller-manager-69b956fbf6-8fg8b" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.393238 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcx65\" (UniqueName: \"kubernetes.io/projected/031743a5-fa78-4f1e-aaba-2b53f308a1b3-kube-api-access-jcx65\") pod \"mariadb-operator-controller-manager-d6c9dc5bc-jngd6\" (UID: \"031743a5-fa78-4f1e-aaba-2b53f308a1b3\") " pod="openstack-operators/mariadb-operator-controller-manager-d6c9dc5bc-jngd6" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.395645 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.397812 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.402652 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-nfprv" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.404662 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.415832 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5b477879bc-t5mpw" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.425063 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.439553 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.442300 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.447426 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5467f8988c-qzk8x" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.448152 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-cwsk8" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.450343 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.468791 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-f589c7597-scb7g"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.470297 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-f589c7597-scb7g" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.471730 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-f589c7597-scb7g"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.472660 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sr4ks\" (UniqueName: \"kubernetes.io/projected/b46c946e-8936-434c-8a13-0670857929d4-kube-api-access-sr4ks\") pod \"octavia-operator-controller-manager-69f59f9d8-s6hxr\" (UID: \"b46c946e-8936-434c-8a13-0670857929d4\") " pod="openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.472713 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kh5jz\" (UniqueName: \"kubernetes.io/projected/c4de151c-737e-4ff2-8445-571ec2a5a8cd-kube-api-access-kh5jz\") pod \"openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd\" (UID: \"c4de151c-737e-4ff2-8445-571ec2a5a8cd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.472744 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6769b\" (UniqueName: \"kubernetes.io/projected/840c119a-0407-45bd-9588-795a38db80a8-kube-api-access-6769b\") pod \"ovn-operator-controller-manager-54d485fd9-mq79t\" (UID: \"840c119a-0407-45bd-9588-795a38db80a8\") " pod="openstack-operators/ovn-operator-controller-manager-54d485fd9-mq79t" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.472798 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-5b84cc7657-ml985" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.472918 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmvgq\" (UniqueName: \"kubernetes.io/projected/41b602d9-b505-4656-8dfa-1443404db0c1-kube-api-access-xmvgq\") pod \"placement-operator-controller-manager-66f6d6849b-kwbrs\" (UID: \"41b602d9-b505-4656-8dfa-1443404db0c1\") " pod="openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.472952 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c4de151c-737e-4ff2-8445-571ec2a5a8cd-cert\") pod \"openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd\" (UID: \"c4de151c-737e-4ff2-8445-571ec2a5a8cd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.472972 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vthbq\" (UniqueName: \"kubernetes.io/projected/b73339a9-4a16-41e5-8739-4822f0199db4-kube-api-access-vthbq\") pod \"swift-operator-controller-manager-76d5577b-w8ds6\" (UID: \"b73339a9-4a16-41e5-8739-4822f0199db4\") " pod="openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.472992 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fp6rr\" (UniqueName: \"kubernetes.io/projected/b5c83451-721d-4897-b4b9-996ee0d7ae94-kube-api-access-fp6rr\") pod \"nova-operator-controller-manager-6c9b57c67-hqrxh\" (UID: \"b5c83451-721d-4897-b4b9-996ee0d7ae94\") " pod="openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.473282 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-p6f48" Oct 06 21:46:44 crc kubenswrapper[5014]: E1006 21:46:44.473474 5014 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Oct 06 21:46:44 crc kubenswrapper[5014]: E1006 21:46:44.476772 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c4de151c-737e-4ff2-8445-571ec2a5a8cd-cert podName:c4de151c-737e-4ff2-8445-571ec2a5a8cd nodeName:}" failed. No retries permitted until 2025-10-06 21:46:44.976736078 +0000 UTC m=+950.269772812 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c4de151c-737e-4ff2-8445-571ec2a5a8cd-cert") pod "openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" (UID: "c4de151c-737e-4ff2-8445-571ec2a5a8cd") : secret "openstack-baremetal-operator-webhook-server-cert" not found Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.491518 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sr4ks\" (UniqueName: \"kubernetes.io/projected/b46c946e-8936-434c-8a13-0670857929d4-kube-api-access-sr4ks\") pod \"octavia-operator-controller-manager-69f59f9d8-s6hxr\" (UID: \"b46c946e-8936-434c-8a13-0670857929d4\") " pod="openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.496663 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-6bb6dcddc-v5l5b"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.497797 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-6bb6dcddc-v5l5b" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.500823 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-dl2wc" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.511855 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-6bb6dcddc-v5l5b"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.514340 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kh5jz\" (UniqueName: \"kubernetes.io/projected/c4de151c-737e-4ff2-8445-571ec2a5a8cd-kube-api-access-kh5jz\") pod \"openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd\" (UID: \"c4de151c-737e-4ff2-8445-571ec2a5a8cd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.516475 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fp6rr\" (UniqueName: \"kubernetes.io/projected/b5c83451-721d-4897-b4b9-996ee0d7ae94-kube-api-access-fp6rr\") pod \"nova-operator-controller-manager-6c9b57c67-hqrxh\" (UID: \"b5c83451-721d-4897-b4b9-996ee0d7ae94\") " pod="openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.519029 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6769b\" (UniqueName: \"kubernetes.io/projected/840c119a-0407-45bd-9588-795a38db80a8-kube-api-access-6769b\") pod \"ovn-operator-controller-manager-54d485fd9-mq79t\" (UID: \"840c119a-0407-45bd-9588-795a38db80a8\") " pod="openstack-operators/ovn-operator-controller-manager-54d485fd9-mq79t" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.532661 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.533932 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.536323 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-n2dmg" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.547506 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.557072 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.568840 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-69b956fbf6-8fg8b" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.575575 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kq5p2\" (UniqueName: \"kubernetes.io/projected/17d7e65a-c72c-4d26-81e3-8374778c5c3b-kube-api-access-kq5p2\") pod \"telemetry-operator-controller-manager-f589c7597-scb7g\" (UID: \"17d7e65a-c72c-4d26-81e3-8374778c5c3b\") " pod="openstack-operators/telemetry-operator-controller-manager-f589c7597-scb7g" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.575637 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvm6j\" (UniqueName: \"kubernetes.io/projected/bd4146d3-3013-497c-85c1-37d5c7ea2e7c-kube-api-access-bvm6j\") pod \"test-operator-controller-manager-6bb6dcddc-v5l5b\" (UID: \"bd4146d3-3013-497c-85c1-37d5c7ea2e7c\") " pod="openstack-operators/test-operator-controller-manager-6bb6dcddc-v5l5b" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.575722 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmvgq\" (UniqueName: \"kubernetes.io/projected/41b602d9-b505-4656-8dfa-1443404db0c1-kube-api-access-xmvgq\") pod \"placement-operator-controller-manager-66f6d6849b-kwbrs\" (UID: \"41b602d9-b505-4656-8dfa-1443404db0c1\") " pod="openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.575764 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vthbq\" (UniqueName: \"kubernetes.io/projected/b73339a9-4a16-41e5-8739-4822f0199db4-kube-api-access-vthbq\") pod \"swift-operator-controller-manager-76d5577b-w8ds6\" (UID: \"b73339a9-4a16-41e5-8739-4822f0199db4\") " pod="openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.575807 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gmx8\" (UniqueName: \"kubernetes.io/projected/6c35d36c-16ac-4ba1-9166-32c9a56ba6a0-kube-api-access-4gmx8\") pod \"watcher-operator-controller-manager-5d98cc5575-md7kb\" (UID: \"6c35d36c-16ac-4ba1-9166-32c9a56ba6a0\") " pod="openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.590088 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-d6c9dc5bc-jngd6" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.627677 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vthbq\" (UniqueName: \"kubernetes.io/projected/b73339a9-4a16-41e5-8739-4822f0199db4-kube-api-access-vthbq\") pod \"swift-operator-controller-manager-76d5577b-w8ds6\" (UID: \"b73339a9-4a16-41e5-8739-4822f0199db4\") " pod="openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.628867 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmvgq\" (UniqueName: \"kubernetes.io/projected/41b602d9-b505-4656-8dfa-1443404db0c1-kube-api-access-xmvgq\") pod \"placement-operator-controller-manager-66f6d6849b-kwbrs\" (UID: \"41b602d9-b505-4656-8dfa-1443404db0c1\") " pod="openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.647330 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-579544b768-thzrl"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.649177 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-579544b768-thzrl" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.651060 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.651380 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-jpvjp" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.665942 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.666143 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-579544b768-thzrl"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.677509 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc-cert\") pod \"openstack-operator-controller-manager-579544b768-thzrl\" (UID: \"d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc\") " pod="openstack-operators/openstack-operator-controller-manager-579544b768-thzrl" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.677570 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gmx8\" (UniqueName: \"kubernetes.io/projected/6c35d36c-16ac-4ba1-9166-32c9a56ba6a0-kube-api-access-4gmx8\") pod \"watcher-operator-controller-manager-5d98cc5575-md7kb\" (UID: \"6c35d36c-16ac-4ba1-9166-32c9a56ba6a0\") " pod="openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.677611 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kq5p2\" (UniqueName: \"kubernetes.io/projected/17d7e65a-c72c-4d26-81e3-8374778c5c3b-kube-api-access-kq5p2\") pod \"telemetry-operator-controller-manager-f589c7597-scb7g\" (UID: \"17d7e65a-c72c-4d26-81e3-8374778c5c3b\") " pod="openstack-operators/telemetry-operator-controller-manager-f589c7597-scb7g" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.677642 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvm6j\" (UniqueName: \"kubernetes.io/projected/bd4146d3-3013-497c-85c1-37d5c7ea2e7c-kube-api-access-bvm6j\") pod \"test-operator-controller-manager-6bb6dcddc-v5l5b\" (UID: \"bd4146d3-3013-497c-85c1-37d5c7ea2e7c\") " pod="openstack-operators/test-operator-controller-manager-6bb6dcddc-v5l5b" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.677669 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5dsc5\" (UniqueName: \"kubernetes.io/projected/d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc-kube-api-access-5dsc5\") pod \"openstack-operator-controller-manager-579544b768-thzrl\" (UID: \"d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc\") " pod="openstack-operators/openstack-operator-controller-manager-579544b768-thzrl" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.678276 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.690167 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-54d485fd9-mq79t" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.701609 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvm6j\" (UniqueName: \"kubernetes.io/projected/bd4146d3-3013-497c-85c1-37d5c7ea2e7c-kube-api-access-bvm6j\") pod \"test-operator-controller-manager-6bb6dcddc-v5l5b\" (UID: \"bd4146d3-3013-497c-85c1-37d5c7ea2e7c\") " pod="openstack-operators/test-operator-controller-manager-6bb6dcddc-v5l5b" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.703540 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gmx8\" (UniqueName: \"kubernetes.io/projected/6c35d36c-16ac-4ba1-9166-32c9a56ba6a0-kube-api-access-4gmx8\") pod \"watcher-operator-controller-manager-5d98cc5575-md7kb\" (UID: \"6c35d36c-16ac-4ba1-9166-32c9a56ba6a0\") " pod="openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.716000 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kq5p2\" (UniqueName: \"kubernetes.io/projected/17d7e65a-c72c-4d26-81e3-8374778c5c3b-kube-api-access-kq5p2\") pod \"telemetry-operator-controller-manager-f589c7597-scb7g\" (UID: \"17d7e65a-c72c-4d26-81e3-8374778c5c3b\") " pod="openstack-operators/telemetry-operator-controller-manager-f589c7597-scb7g" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.727406 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.734522 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.738988 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-hdktb" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.741252 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.741741 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr"] Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.780808 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc-cert\") pod \"openstack-operator-controller-manager-579544b768-thzrl\" (UID: \"d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc\") " pod="openstack-operators/openstack-operator-controller-manager-579544b768-thzrl" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.780895 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5dsc5\" (UniqueName: \"kubernetes.io/projected/d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc-kube-api-access-5dsc5\") pod \"openstack-operator-controller-manager-579544b768-thzrl\" (UID: \"d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc\") " pod="openstack-operators/openstack-operator-controller-manager-579544b768-thzrl" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.780924 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/df9f59e9-4a23-4204-be0f-5f2729f419b1-cert\") pod \"infra-operator-controller-manager-84788b6bc5-zbcqh\" (UID: \"df9f59e9-4a23-4204-be0f-5f2729f419b1\") " pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" Oct 06 21:46:44 crc kubenswrapper[5014]: E1006 21:46:44.784197 5014 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Oct 06 21:46:44 crc kubenswrapper[5014]: E1006 21:46:44.784273 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc-cert podName:d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc nodeName:}" failed. No retries permitted until 2025-10-06 21:46:45.28425537 +0000 UTC m=+950.577292104 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc-cert") pod "openstack-operator-controller-manager-579544b768-thzrl" (UID: "d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc") : secret "webhook-server-cert" not found Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.793067 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/df9f59e9-4a23-4204-be0f-5f2729f419b1-cert\") pod \"infra-operator-controller-manager-84788b6bc5-zbcqh\" (UID: \"df9f59e9-4a23-4204-be0f-5f2729f419b1\") " pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.802462 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5dsc5\" (UniqueName: \"kubernetes.io/projected/d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc-kube-api-access-5dsc5\") pod \"openstack-operator-controller-manager-579544b768-thzrl\" (UID: \"d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc\") " pod="openstack-operators/openstack-operator-controller-manager-579544b768-thzrl" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.803595 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.828059 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.844536 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-f589c7597-scb7g" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.876921 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-6bb6dcddc-v5l5b" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.883537 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgsgh\" (UniqueName: \"kubernetes.io/projected/c1d9fb64-549f-4b44-bed2-b49d474beb39-kube-api-access-mgsgh\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr\" (UID: \"c1d9fb64-549f-4b44-bed2-b49d474beb39\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.985145 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgsgh\" (UniqueName: \"kubernetes.io/projected/c1d9fb64-549f-4b44-bed2-b49d474beb39-kube-api-access-mgsgh\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr\" (UID: \"c1d9fb64-549f-4b44-bed2-b49d474beb39\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr" Oct 06 21:46:44 crc kubenswrapper[5014]: I1006 21:46:44.985259 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c4de151c-737e-4ff2-8445-571ec2a5a8cd-cert\") pod \"openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd\" (UID: \"c4de151c-737e-4ff2-8445-571ec2a5a8cd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" Oct 06 21:46:44 crc kubenswrapper[5014]: E1006 21:46:44.986026 5014 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Oct 06 21:46:44 crc kubenswrapper[5014]: E1006 21:46:44.986066 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c4de151c-737e-4ff2-8445-571ec2a5a8cd-cert podName:c4de151c-737e-4ff2-8445-571ec2a5a8cd nodeName:}" failed. No retries permitted until 2025-10-06 21:46:45.986052886 +0000 UTC m=+951.279089620 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c4de151c-737e-4ff2-8445-571ec2a5a8cd-cert") pod "openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" (UID: "c4de151c-737e-4ff2-8445-571ec2a5a8cd") : secret "openstack-baremetal-operator-webhook-server-cert" not found Oct 06 21:46:45 crc kubenswrapper[5014]: I1006 21:46:45.011298 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgsgh\" (UniqueName: \"kubernetes.io/projected/c1d9fb64-549f-4b44-bed2-b49d474beb39-kube-api-access-mgsgh\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr\" (UID: \"c1d9fb64-549f-4b44-bed2-b49d474beb39\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr" Oct 06 21:46:45 crc kubenswrapper[5014]: I1006 21:46:45.021407 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" Oct 06 21:46:45 crc kubenswrapper[5014]: I1006 21:46:45.169730 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr" Oct 06 21:46:45 crc kubenswrapper[5014]: I1006 21:46:45.289018 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc-cert\") pod \"openstack-operator-controller-manager-579544b768-thzrl\" (UID: \"d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc\") " pod="openstack-operators/openstack-operator-controller-manager-579544b768-thzrl" Oct 06 21:46:45 crc kubenswrapper[5014]: I1006 21:46:45.303894 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc-cert\") pod \"openstack-operator-controller-manager-579544b768-thzrl\" (UID: \"d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc\") " pod="openstack-operators/openstack-operator-controller-manager-579544b768-thzrl" Oct 06 21:46:45 crc kubenswrapper[5014]: I1006 21:46:45.456086 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-579544b768-thzrl" Oct 06 21:46:45 crc kubenswrapper[5014]: I1006 21:46:45.937538 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-7ccfc8cf49-9k728"] Oct 06 21:46:45 crc kubenswrapper[5014]: I1006 21:46:45.950722 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5b477879bc-t5mpw"] Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.011080 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c4de151c-737e-4ff2-8445-571ec2a5a8cd-cert\") pod \"openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd\" (UID: \"c4de151c-737e-4ff2-8445-571ec2a5a8cd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.020233 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c4de151c-737e-4ff2-8445-571ec2a5a8cd-cert\") pod \"openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd\" (UID: \"c4de151c-737e-4ff2-8445-571ec2a5a8cd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.078640 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5b84cc7657-ml985"] Oct 06 21:46:46 crc kubenswrapper[5014]: W1006 21:46:46.084018 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1afbb4e8_7c9b_4521_9496_d0df6b2003bb.slice/crio-398e1d70509986727c62a60eed5e136d6c56727a04637a57abd2432297959e25 WatchSource:0}: Error finding container 398e1d70509986727c62a60eed5e136d6c56727a04637a57abd2432297959e25: Status 404 returned error can't find the container with id 398e1d70509986727c62a60eed5e136d6c56727a04637a57abd2432297959e25 Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.086045 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-58d86cd59d-8w8ws"] Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.095393 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-fd648f65-7qqn8"] Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.117187 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-64f56ff694-fbnjx"] Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.137136 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54d485fd9-mq79t"] Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.157326 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-84bd8f6848-wpcz6"] Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.170919 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5467f8988c-qzk8x"] Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.198763 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.336594 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-7ccfc8cf49-9k728" event={"ID":"89fc8a32-1a12-4d35-b7fe-2e1fa829c4b7","Type":"ContainerStarted","Data":"61c3a5b874a4a3bd87397adb8c364d3a01f7dd7db9d932ccbc35339846963db6"} Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.337917 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-fd648f65-7qqn8" event={"ID":"3e1db7a5-54f5-4e2c-9edc-3a5f7ce19d9e","Type":"ContainerStarted","Data":"c4e77b45a217732fd2236f383070080e4c21408eb2df54a71cacb83925a484d7"} Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.340785 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5b477879bc-t5mpw" event={"ID":"e134fc2c-d3a1-41d2-bf11-257d15bc68c8","Type":"ContainerStarted","Data":"108fed741067659ad539bd52b6fbde38448f675bef92fa73a3cf03b055413611"} Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.342327 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5b84cc7657-ml985" event={"ID":"1afbb4e8-7c9b-4521-9496-d0df6b2003bb","Type":"ContainerStarted","Data":"398e1d70509986727c62a60eed5e136d6c56727a04637a57abd2432297959e25"} Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.349137 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-58d86cd59d-8w8ws" event={"ID":"6aea271b-eb7b-4e06-8bbb-65807a8027b6","Type":"ContainerStarted","Data":"fbdbc80c7f0bab9ca2cd667587a67d42069eb726a902114423d1a0ba69cc4d79"} Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.354890 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-64f56ff694-fbnjx" event={"ID":"e03f73aa-fc56-435f-8f50-e55a813a4b0c","Type":"ContainerStarted","Data":"33efbbe63c9a5b80da4737d93700912b0e40569fc64671968512143718ce9fa3"} Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.360561 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-84bd8f6848-wpcz6" event={"ID":"89c6e73c-a5c8-4909-85e5-48118abefc95","Type":"ContainerStarted","Data":"581595a673a6ef13cc7c7506a355b243b6f35ad74a334316f33dc066249acbb1"} Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.362537 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54d485fd9-mq79t" event={"ID":"840c119a-0407-45bd-9588-795a38db80a8","Type":"ContainerStarted","Data":"451e33ded8e321dec1c5972e13298828e89539abda576925a006af74e462f408"} Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.367284 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5467f8988c-qzk8x" event={"ID":"df7496c8-460c-4f74-83a1-bb1eefeac13c","Type":"ContainerStarted","Data":"4ea5286bc772b6be0f3ae0c73907babae41f2b3f42df5ae9c47750fc8dba5bd8"} Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.389160 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-69b956fbf6-8fg8b"] Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.433231 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-6bb6dcddc-v5l5b"] Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.445212 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-d6c9dc5bc-jngd6"] Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.455054 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh"] Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.481119 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh"] Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.486717 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6"] Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.510981 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr"] Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.524671 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-579544b768-thzrl"] Oct 06 21:46:46 crc kubenswrapper[5014]: E1006 21:46:46.527399 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:e4c4ff39c54c0af231fb781759ab50ed86285c74d38bdea43fa75646b762d842,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fp6rr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-6c9b57c67-hqrxh_openstack-operators(b5c83451-721d-4897-b4b9-996ee0d7ae94): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 06 21:46:46 crc kubenswrapper[5014]: E1006 21:46:46.545392 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:da5c3078d80878d66c616e6f8a0bb909f95d971cde2c612f96fded064113e182,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sr4ks,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-69f59f9d8-s6hxr_openstack-operators(b46c946e-8936-434c-8a13-0670857929d4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.546247 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-f589c7597-scb7g"] Oct 06 21:46:46 crc kubenswrapper[5014]: E1006 21:46:46.551419 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:b6cef68bfaacdf992a9fa1a6b03a848a48c18cbb6ed12d95561b4b37d858b99f,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bqdlb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-84788b6bc5-zbcqh_openstack-operators(df9f59e9-4a23-4204-be0f-5f2729f419b1): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.561186 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr"] Oct 06 21:46:46 crc kubenswrapper[5014]: E1006 21:46:46.563554 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:adc23c5fd1aece2b16dc8e22ceed628f9a719455e39d3f98c77544665c6749e1,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xmvgq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-66f6d6849b-kwbrs_openstack-operators(41b602d9-b505-4656-8dfa-1443404db0c1): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.582653 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs"] Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.606511 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb"] Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.610682 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb"] Oct 06 21:46:46 crc kubenswrapper[5014]: E1006 21:46:46.612571 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:64f57b2b59dea2bd9fae91490c5bec2687131884a049e6579819d9f951b877c6,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4gmx8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-5d98cc5575-md7kb_openstack-operators(6c35d36c-16ac-4ba1-9166-32c9a56ba6a0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 06 21:46:46 crc kubenswrapper[5014]: E1006 21:46:46.746155 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:637bb7b9ac308bc1e323391a3593b824f688090a856c83385814c17a571b1eed,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vthbq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-76d5577b-w8ds6_openstack-operators(b73339a9-4a16-41e5-8739-4822f0199db4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 06 21:46:46 crc kubenswrapper[5014]: E1006 21:46:46.746270 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mgsgh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr_openstack-operators(c1d9fb64-549f-4b44-bed2-b49d474beb39): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 06 21:46:46 crc kubenswrapper[5014]: E1006 21:46:46.746359 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:063aae1458289d1090a77c74c2b978b9eb978b0e4062c399f0cb5434a8dd2757,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ttwjx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-7cb48dbc-5b2mb_openstack-operators(bade47f3-c50b-4f05-acfb-192e11c5b9e6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 06 21:46:46 crc kubenswrapper[5014]: E1006 21:46:46.748217 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr" podUID="c1d9fb64-549f-4b44-bed2-b49d474beb39" Oct 06 21:46:46 crc kubenswrapper[5014]: I1006 21:46:46.795039 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd"] Oct 06 21:46:46 crc kubenswrapper[5014]: E1006 21:46:46.920212 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr" podUID="b46c946e-8936-434c-8a13-0670857929d4" Oct 06 21:46:46 crc kubenswrapper[5014]: E1006 21:46:46.948599 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh" podUID="b5c83451-721d-4897-b4b9-996ee0d7ae94" Oct 06 21:46:47 crc kubenswrapper[5014]: E1006 21:46:47.073173 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs" podUID="41b602d9-b505-4656-8dfa-1443404db0c1" Oct 06 21:46:47 crc kubenswrapper[5014]: E1006 21:46:47.080756 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" podUID="df9f59e9-4a23-4204-be0f-5f2729f419b1" Oct 06 21:46:47 crc kubenswrapper[5014]: E1006 21:46:47.172743 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb" podUID="6c35d36c-16ac-4ba1-9166-32c9a56ba6a0" Oct 06 21:46:47 crc kubenswrapper[5014]: E1006 21:46:47.180100 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6" podUID="b73339a9-4a16-41e5-8739-4822f0199db4" Oct 06 21:46:47 crc kubenswrapper[5014]: E1006 21:46:47.184200 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb" podUID="bade47f3-c50b-4f05-acfb-192e11c5b9e6" Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.388587 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-579544b768-thzrl" event={"ID":"d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc","Type":"ContainerStarted","Data":"7e576e79b94b150c373517abe28cd8d76aeee69993d325f8fabf0620edc03ac4"} Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.388640 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-579544b768-thzrl" event={"ID":"d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc","Type":"ContainerStarted","Data":"4317353a0eb7292fd6db1ee935efd163fd00d9b759837a6ef6e7a5d548dd8275"} Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.388651 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-579544b768-thzrl" event={"ID":"d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc","Type":"ContainerStarted","Data":"c93f784d4679ae4800217951a6c5a53a248e57cf92d8557b590258e052b54e55"} Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.388807 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-579544b768-thzrl" Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.389999 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-69b956fbf6-8fg8b" event={"ID":"bea4afa0-51fb-4d29-84ba-807b4adc79cd","Type":"ContainerStarted","Data":"4986b1aff0d416fde7f3bc0c563fa01def6f91f3f53cef5473736e7f510294f7"} Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.396076 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6" event={"ID":"b73339a9-4a16-41e5-8739-4822f0199db4","Type":"ContainerStarted","Data":"7ce76276b52d6deb6bbb390424f2d3820faf55c17ce1bc77450101764cd29aff"} Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.396110 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6" event={"ID":"b73339a9-4a16-41e5-8739-4822f0199db4","Type":"ContainerStarted","Data":"8cc0b12ab11c8bdac309deb6a96f8832c26c8e77ae359cabbe34325952181bbd"} Oct 06 21:46:47 crc kubenswrapper[5014]: E1006 21:46:47.404351 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:637bb7b9ac308bc1e323391a3593b824f688090a856c83385814c17a571b1eed\\\"\"" pod="openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6" podUID="b73339a9-4a16-41e5-8739-4822f0199db4" Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.414143 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" event={"ID":"df9f59e9-4a23-4204-be0f-5f2729f419b1","Type":"ContainerStarted","Data":"c7c6c95eee14cb2ce4fb9bfa44bfc14203c925cc77db3855c4c43488bc619f86"} Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.414185 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" event={"ID":"df9f59e9-4a23-4204-be0f-5f2729f419b1","Type":"ContainerStarted","Data":"5615cfe112f82fde7e0c93bfc3878b9207ebad00ea412fc1690525941a3d6e5f"} Oct 06 21:46:47 crc kubenswrapper[5014]: E1006 21:46:47.417976 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:b6cef68bfaacdf992a9fa1a6b03a848a48c18cbb6ed12d95561b4b37d858b99f\\\"\"" pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" podUID="df9f59e9-4a23-4204-be0f-5f2729f419b1" Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.421934 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb" event={"ID":"6c35d36c-16ac-4ba1-9166-32c9a56ba6a0","Type":"ContainerStarted","Data":"4c4febd54a21a2a6e0e82ad40495c16791a138a92da28454e2ae44d26cdf6b67"} Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.422016 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb" event={"ID":"6c35d36c-16ac-4ba1-9166-32c9a56ba6a0","Type":"ContainerStarted","Data":"8c6026303b7477ad217ae586b6c06d8de12ce1b59990a480a71cbcd357e43b08"} Oct 06 21:46:47 crc kubenswrapper[5014]: E1006 21:46:47.423685 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:64f57b2b59dea2bd9fae91490c5bec2687131884a049e6579819d9f951b877c6\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb" podUID="6c35d36c-16ac-4ba1-9166-32c9a56ba6a0" Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.424030 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb" event={"ID":"bade47f3-c50b-4f05-acfb-192e11c5b9e6","Type":"ContainerStarted","Data":"61519a011dcf2449573a45eabedf21d753fc699efc612bda6ae5f6d814b0cbb5"} Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.424070 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb" event={"ID":"bade47f3-c50b-4f05-acfb-192e11c5b9e6","Type":"ContainerStarted","Data":"df380310d25f7b9933ea166f1ead2243020d727432e42039a16ab506f244a4c9"} Oct 06 21:46:47 crc kubenswrapper[5014]: E1006 21:46:47.424702 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:063aae1458289d1090a77c74c2b978b9eb978b0e4062c399f0cb5434a8dd2757\\\"\"" pod="openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb" podUID="bade47f3-c50b-4f05-acfb-192e11c5b9e6" Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.425038 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-579544b768-thzrl" podStartSLOduration=3.425028754 podStartE2EDuration="3.425028754s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:46:47.422294308 +0000 UTC m=+952.715331042" watchObservedRunningTime="2025-10-06 21:46:47.425028754 +0000 UTC m=+952.718065478" Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.435935 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" event={"ID":"c4de151c-737e-4ff2-8445-571ec2a5a8cd","Type":"ContainerStarted","Data":"799fa241b3d57acfab7b2e508cc62edf3eddd520821241dde68b76da72b79c5d"} Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.444739 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-6bb6dcddc-v5l5b" event={"ID":"bd4146d3-3013-497c-85c1-37d5c7ea2e7c","Type":"ContainerStarted","Data":"1cacb4c5bdc9c693a52f5847b7644be6aab30ec962b452320ac7801e957cc4b9"} Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.456800 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh" event={"ID":"b5c83451-721d-4897-b4b9-996ee0d7ae94","Type":"ContainerStarted","Data":"7e0c57b407ee1524146739b8952afacf9ea49e40012b373ee985152a13f6d408"} Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.456847 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh" event={"ID":"b5c83451-721d-4897-b4b9-996ee0d7ae94","Type":"ContainerStarted","Data":"2f9610972767564dc28bd19affbeda2d883e3cd61b6980a0052509cd8e3d49e8"} Oct 06 21:46:47 crc kubenswrapper[5014]: E1006 21:46:47.463967 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:e4c4ff39c54c0af231fb781759ab50ed86285c74d38bdea43fa75646b762d842\\\"\"" pod="openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh" podUID="b5c83451-721d-4897-b4b9-996ee0d7ae94" Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.468815 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr" event={"ID":"c1d9fb64-549f-4b44-bed2-b49d474beb39","Type":"ContainerStarted","Data":"b66c1b6c4b4f928a52549daf65ff33d60d0584b6d203f3c5ef182a78d78bbd88"} Oct 06 21:46:47 crc kubenswrapper[5014]: E1006 21:46:47.475644 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr" podUID="c1d9fb64-549f-4b44-bed2-b49d474beb39" Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.483912 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs" event={"ID":"41b602d9-b505-4656-8dfa-1443404db0c1","Type":"ContainerStarted","Data":"93f7b37e83a485f4f16380a178564629b68815c30bdeb0841ed9dc1e7ea561ab"} Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.483952 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs" event={"ID":"41b602d9-b505-4656-8dfa-1443404db0c1","Type":"ContainerStarted","Data":"4fa2d61cab0c699a98c1d04f60a666e1c487cdbc66a9664ab1375db255659a1b"} Oct 06 21:46:47 crc kubenswrapper[5014]: E1006 21:46:47.496367 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:adc23c5fd1aece2b16dc8e22ceed628f9a719455e39d3f98c77544665c6749e1\\\"\"" pod="openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs" podUID="41b602d9-b505-4656-8dfa-1443404db0c1" Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.525916 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-f589c7597-scb7g" event={"ID":"17d7e65a-c72c-4d26-81e3-8374778c5c3b","Type":"ContainerStarted","Data":"2c0514541c3cc33e61aac0328a6856d14680fbc0dd633c29b8028d8777b3f6d9"} Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.527406 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-d6c9dc5bc-jngd6" event={"ID":"031743a5-fa78-4f1e-aaba-2b53f308a1b3","Type":"ContainerStarted","Data":"f852cbebd999837e398cd1500e2040955fd686790498729ba295d3a1d393770f"} Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.563837 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr" event={"ID":"b46c946e-8936-434c-8a13-0670857929d4","Type":"ContainerStarted","Data":"8a896b7560cf3948f36b11ca708d287a26178f3c8b0c2d2d423ed482196f6a78"} Oct 06 21:46:47 crc kubenswrapper[5014]: I1006 21:46:47.563877 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr" event={"ID":"b46c946e-8936-434c-8a13-0670857929d4","Type":"ContainerStarted","Data":"d2eb57f2ec0931d2ecd2ddc951a4be4e95731558804a002df43bf95a46dd6688"} Oct 06 21:46:47 crc kubenswrapper[5014]: E1006 21:46:47.568717 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:da5c3078d80878d66c616e6f8a0bb909f95d971cde2c612f96fded064113e182\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr" podUID="b46c946e-8936-434c-8a13-0670857929d4" Oct 06 21:46:48 crc kubenswrapper[5014]: E1006 21:46:48.586927 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:063aae1458289d1090a77c74c2b978b9eb978b0e4062c399f0cb5434a8dd2757\\\"\"" pod="openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb" podUID="bade47f3-c50b-4f05-acfb-192e11c5b9e6" Oct 06 21:46:48 crc kubenswrapper[5014]: E1006 21:46:48.586961 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr" podUID="c1d9fb64-549f-4b44-bed2-b49d474beb39" Oct 06 21:46:48 crc kubenswrapper[5014]: E1006 21:46:48.587074 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:b6cef68bfaacdf992a9fa1a6b03a848a48c18cbb6ed12d95561b4b37d858b99f\\\"\"" pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" podUID="df9f59e9-4a23-4204-be0f-5f2729f419b1" Oct 06 21:46:48 crc kubenswrapper[5014]: E1006 21:46:48.587341 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:64f57b2b59dea2bd9fae91490c5bec2687131884a049e6579819d9f951b877c6\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb" podUID="6c35d36c-16ac-4ba1-9166-32c9a56ba6a0" Oct 06 21:46:48 crc kubenswrapper[5014]: E1006 21:46:48.587447 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:da5c3078d80878d66c616e6f8a0bb909f95d971cde2c612f96fded064113e182\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr" podUID="b46c946e-8936-434c-8a13-0670857929d4" Oct 06 21:46:48 crc kubenswrapper[5014]: E1006 21:46:48.587490 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:e4c4ff39c54c0af231fb781759ab50ed86285c74d38bdea43fa75646b762d842\\\"\"" pod="openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh" podUID="b5c83451-721d-4897-b4b9-996ee0d7ae94" Oct 06 21:46:48 crc kubenswrapper[5014]: E1006 21:46:48.588039 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:adc23c5fd1aece2b16dc8e22ceed628f9a719455e39d3f98c77544665c6749e1\\\"\"" pod="openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs" podUID="41b602d9-b505-4656-8dfa-1443404db0c1" Oct 06 21:46:48 crc kubenswrapper[5014]: E1006 21:46:48.598248 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:637bb7b9ac308bc1e323391a3593b824f688090a856c83385814c17a571b1eed\\\"\"" pod="openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6" podUID="b73339a9-4a16-41e5-8739-4822f0199db4" Oct 06 21:46:51 crc kubenswrapper[5014]: I1006 21:46:51.736265 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:46:51 crc kubenswrapper[5014]: I1006 21:46:51.737320 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:46:55 crc kubenswrapper[5014]: I1006 21:46:55.463241 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-579544b768-thzrl" Oct 06 21:46:57 crc kubenswrapper[5014]: I1006 21:46:57.648200 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-64f56ff694-fbnjx" event={"ID":"e03f73aa-fc56-435f-8f50-e55a813a4b0c","Type":"ContainerStarted","Data":"d155fd5236f1fc27742e46ef3b4e91aafc4448cc06663657e3aeeeb6d7ec28f3"} Oct 06 21:46:57 crc kubenswrapper[5014]: I1006 21:46:57.655795 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-84bd8f6848-wpcz6" event={"ID":"89c6e73c-a5c8-4909-85e5-48118abefc95","Type":"ContainerStarted","Data":"b73f73b8c3656e17ffffd0428d5170a897099ea2bdccdcdc93bb9e84c79bf11b"} Oct 06 21:46:57 crc kubenswrapper[5014]: I1006 21:46:57.662716 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5b84cc7657-ml985" event={"ID":"1afbb4e8-7c9b-4521-9496-d0df6b2003bb","Type":"ContainerStarted","Data":"e8b90a7a63c26c497918e34f969d10c91aba878816ead8b020fad982d4011c95"} Oct 06 21:46:57 crc kubenswrapper[5014]: I1006 21:46:57.673347 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54d485fd9-mq79t" event={"ID":"840c119a-0407-45bd-9588-795a38db80a8","Type":"ContainerStarted","Data":"85bf0be3a630d1abbf136f6ccf91852b8086a30e5a1fd23a921fec902cac372d"} Oct 06 21:46:57 crc kubenswrapper[5014]: I1006 21:46:57.677821 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-58d86cd59d-8w8ws" event={"ID":"6aea271b-eb7b-4e06-8bbb-65807a8027b6","Type":"ContainerStarted","Data":"dc673fb13669c9c4c240b1d76ed9265de5e81e6b11fc5e14401309ad943387e0"} Oct 06 21:46:57 crc kubenswrapper[5014]: I1006 21:46:57.682541 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-69b956fbf6-8fg8b" event={"ID":"bea4afa0-51fb-4d29-84ba-807b4adc79cd","Type":"ContainerStarted","Data":"602ad3f338b5cc61396bbca3b89d8fa12092fad1cc60174258b4e50fd0d76c5b"} Oct 06 21:46:57 crc kubenswrapper[5014]: I1006 21:46:57.684383 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" event={"ID":"c4de151c-737e-4ff2-8445-571ec2a5a8cd","Type":"ContainerStarted","Data":"6644dadea15a29f9fbe7caddd6602bfa2763fdc4f34021064b13843a3bc1b063"} Oct 06 21:46:57 crc kubenswrapper[5014]: I1006 21:46:57.687229 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-6bb6dcddc-v5l5b" event={"ID":"bd4146d3-3013-497c-85c1-37d5c7ea2e7c","Type":"ContainerStarted","Data":"c95a006f0472a387a9bd75d4451005aa50c55c142a4884e99df54b9a682d9e1d"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.694549 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-7ccfc8cf49-9k728" event={"ID":"89fc8a32-1a12-4d35-b7fe-2e1fa829c4b7","Type":"ContainerStarted","Data":"a86a83c6e386a48df25f19a6c4aa901ce9a95065c2be5ea52062270d62bb8999"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.695876 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-7ccfc8cf49-9k728" event={"ID":"89fc8a32-1a12-4d35-b7fe-2e1fa829c4b7","Type":"ContainerStarted","Data":"ed4d0c4db715a890606d95c04052b71d9d2d4563f0c02df100b631b60a849f51"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.695904 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-7ccfc8cf49-9k728" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.696905 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5467f8988c-qzk8x" event={"ID":"df7496c8-460c-4f74-83a1-bb1eefeac13c","Type":"ContainerStarted","Data":"456d4d83f04199fdb7a55a9653cc70a0c2ce6bf49d9318994222e7cf3711e95b"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.696954 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5467f8988c-qzk8x" event={"ID":"df7496c8-460c-4f74-83a1-bb1eefeac13c","Type":"ContainerStarted","Data":"8a37b6dbddd0a5dbc75d0f2618fb9851ea496c0003b718f8f791052935812332"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.697098 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5467f8988c-qzk8x" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.698224 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-64f56ff694-fbnjx" event={"ID":"e03f73aa-fc56-435f-8f50-e55a813a4b0c","Type":"ContainerStarted","Data":"57c714e673ab5cf1344688e82e69610fb02473af457d89bb36e4520dd22d4cbf"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.698340 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-64f56ff694-fbnjx" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.699461 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54d485fd9-mq79t" event={"ID":"840c119a-0407-45bd-9588-795a38db80a8","Type":"ContainerStarted","Data":"1bc6ab69fd23662780b55b5180ece0095a3f0ae138e9083bf97fa134487024cc"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.699635 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-54d485fd9-mq79t" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.700818 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-f589c7597-scb7g" event={"ID":"17d7e65a-c72c-4d26-81e3-8374778c5c3b","Type":"ContainerStarted","Data":"5b7a5df3b91edfc4da7a2017f539609569d81da1c2124aaa5a67c346cb61979b"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.700865 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-f589c7597-scb7g" event={"ID":"17d7e65a-c72c-4d26-81e3-8374778c5c3b","Type":"ContainerStarted","Data":"815cd360a54d0dd78cd0bf7e8db41ced49a0414b5bf9dbf2e45fbe42f7841ba1"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.701062 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-f589c7597-scb7g" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.702143 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-d6c9dc5bc-jngd6" event={"ID":"031743a5-fa78-4f1e-aaba-2b53f308a1b3","Type":"ContainerStarted","Data":"b74999b873ff2072f2312322687e8f2bfb0c4f1edc9f16851bea6147841c4ce9"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.702173 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-d6c9dc5bc-jngd6" event={"ID":"031743a5-fa78-4f1e-aaba-2b53f308a1b3","Type":"ContainerStarted","Data":"09300995746cc02b4d68f5724b9b5405b62211a33377e669b60b81f37c27eb9f"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.702299 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-d6c9dc5bc-jngd6" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.705265 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-84bd8f6848-wpcz6" event={"ID":"89c6e73c-a5c8-4909-85e5-48118abefc95","Type":"ContainerStarted","Data":"4c61d597c933866575835ee0bd3c81072e212e32f616a19583119dc14b91ab6a"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.705384 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-84bd8f6848-wpcz6" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.706642 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" event={"ID":"c4de151c-737e-4ff2-8445-571ec2a5a8cd","Type":"ContainerStarted","Data":"2d08234fc109ac566367b09da64df32407d09d9f67bb801f7c351da11d92848c"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.706882 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.707897 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-fd648f65-7qqn8" event={"ID":"3e1db7a5-54f5-4e2c-9edc-3a5f7ce19d9e","Type":"ContainerStarted","Data":"231a80b7a6abdd7de12b08249ae1dea265a04fe9036000bd4c64cf98ddb3d6dd"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.707929 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-fd648f65-7qqn8" event={"ID":"3e1db7a5-54f5-4e2c-9edc-3a5f7ce19d9e","Type":"ContainerStarted","Data":"828c57ea6f8123f01a59d40c0aac6623ac571fc5fc11a255857418d5847c3ebe"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.707997 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-fd648f65-7qqn8" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.709219 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-6bb6dcddc-v5l5b" event={"ID":"bd4146d3-3013-497c-85c1-37d5c7ea2e7c","Type":"ContainerStarted","Data":"4a421029150773c5213eebe3df8b050c940d51bc7e1b132e8777e79614caeaa9"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.709351 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-6bb6dcddc-v5l5b" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.710946 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5b477879bc-t5mpw" event={"ID":"e134fc2c-d3a1-41d2-bf11-257d15bc68c8","Type":"ContainerStarted","Data":"1ace795ca65c764d90dfa88888e88a53a122ca788f20a59f45e2794236d2d1b4"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.710979 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5b477879bc-t5mpw" event={"ID":"e134fc2c-d3a1-41d2-bf11-257d15bc68c8","Type":"ContainerStarted","Data":"677df4198449a854c189ad3df40d8914bb173f5ab5b1aaa57ad25bf9b8b88925"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.711079 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5b477879bc-t5mpw" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.713085 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-69b956fbf6-8fg8b" event={"ID":"bea4afa0-51fb-4d29-84ba-807b4adc79cd","Type":"ContainerStarted","Data":"4e83adccbb8682ad0514cb9788af8f1c3cd7a97649f942d56b4d0e5901851b6d"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.713209 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-69b956fbf6-8fg8b" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.715530 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5b84cc7657-ml985" event={"ID":"1afbb4e8-7c9b-4521-9496-d0df6b2003bb","Type":"ContainerStarted","Data":"82d8ef7581aefb6cf90721133300ff985d00849b6ef3972cd3ec43ec57768574"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.715748 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-5b84cc7657-ml985" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.717171 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-58d86cd59d-8w8ws" event={"ID":"6aea271b-eb7b-4e06-8bbb-65807a8027b6","Type":"ContainerStarted","Data":"fa4f76dbbb187d524d1b05eb3d740ba9de4fd3ef3c0ea01967252ec11039168d"} Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.717345 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-58d86cd59d-8w8ws" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.722282 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-7ccfc8cf49-9k728" podStartSLOduration=4.662763471 podStartE2EDuration="15.722272505s" podCreationTimestamp="2025-10-06 21:46:43 +0000 UTC" firstStartedPulling="2025-10-06 21:46:45.954136215 +0000 UTC m=+951.247172949" lastFinishedPulling="2025-10-06 21:46:57.013645249 +0000 UTC m=+962.306681983" observedRunningTime="2025-10-06 21:46:58.711324908 +0000 UTC m=+964.004361642" watchObservedRunningTime="2025-10-06 21:46:58.722272505 +0000 UTC m=+964.015309239" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.750264 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-84bd8f6848-wpcz6" podStartSLOduration=4.940222758 podStartE2EDuration="15.750244983s" podCreationTimestamp="2025-10-06 21:46:43 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.199410331 +0000 UTC m=+951.492447055" lastFinishedPulling="2025-10-06 21:46:57.009432536 +0000 UTC m=+962.302469280" observedRunningTime="2025-10-06 21:46:58.750003796 +0000 UTC m=+964.043040530" watchObservedRunningTime="2025-10-06 21:46:58.750244983 +0000 UTC m=+964.043281717" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.801549 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-64f56ff694-fbnjx" podStartSLOduration=4.970991585 podStartE2EDuration="15.801533121s" podCreationTimestamp="2025-10-06 21:46:43 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.178965972 +0000 UTC m=+951.472002706" lastFinishedPulling="2025-10-06 21:46:57.009507508 +0000 UTC m=+962.302544242" observedRunningTime="2025-10-06 21:46:58.779017547 +0000 UTC m=+964.072054271" watchObservedRunningTime="2025-10-06 21:46:58.801533121 +0000 UTC m=+964.094569855" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.803513 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-f589c7597-scb7g" podStartSLOduration=4.347440254 podStartE2EDuration="14.803507604s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.559241002 +0000 UTC m=+951.852277736" lastFinishedPulling="2025-10-06 21:46:57.015308342 +0000 UTC m=+962.308345086" observedRunningTime="2025-10-06 21:46:58.800749367 +0000 UTC m=+964.093786101" watchObservedRunningTime="2025-10-06 21:46:58.803507604 +0000 UTC m=+964.096544338" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.826957 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5b477879bc-t5mpw" podStartSLOduration=3.779870768 podStartE2EDuration="14.826939558s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:45.966174887 +0000 UTC m=+951.259211621" lastFinishedPulling="2025-10-06 21:46:57.013243637 +0000 UTC m=+962.306280411" observedRunningTime="2025-10-06 21:46:58.824384756 +0000 UTC m=+964.117421490" watchObservedRunningTime="2025-10-06 21:46:58.826939558 +0000 UTC m=+964.119976292" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.850478 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5467f8988c-qzk8x" podStartSLOduration=4.036456953 podStartE2EDuration="14.850461075s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.199329247 +0000 UTC m=+951.492365981" lastFinishedPulling="2025-10-06 21:46:57.013333369 +0000 UTC m=+962.306370103" observedRunningTime="2025-10-06 21:46:58.844582058 +0000 UTC m=+964.137618792" watchObservedRunningTime="2025-10-06 21:46:58.850461075 +0000 UTC m=+964.143497809" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.870843 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-6bb6dcddc-v5l5b" podStartSLOduration=4.328842283 podStartE2EDuration="14.870830321s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.46592025 +0000 UTC m=+951.758956984" lastFinishedPulling="2025-10-06 21:46:57.007908278 +0000 UTC m=+962.300945022" observedRunningTime="2025-10-06 21:46:58.869689095 +0000 UTC m=+964.162725829" watchObservedRunningTime="2025-10-06 21:46:58.870830321 +0000 UTC m=+964.163867045" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.886686 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-d6c9dc5bc-jngd6" podStartSLOduration=4.337769147 podStartE2EDuration="14.886673574s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.470554957 +0000 UTC m=+951.763591691" lastFinishedPulling="2025-10-06 21:46:57.019459384 +0000 UTC m=+962.312496118" observedRunningTime="2025-10-06 21:46:58.884425753 +0000 UTC m=+964.177462487" watchObservedRunningTime="2025-10-06 21:46:58.886673574 +0000 UTC m=+964.179710308" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.915208 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" podStartSLOduration=4.730236674 podStartE2EDuration="14.915192169s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.824344177 +0000 UTC m=+952.117380911" lastFinishedPulling="2025-10-06 21:46:57.009299672 +0000 UTC m=+962.302336406" observedRunningTime="2025-10-06 21:46:58.913020451 +0000 UTC m=+964.206057185" watchObservedRunningTime="2025-10-06 21:46:58.915192169 +0000 UTC m=+964.208228893" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.942727 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-fd648f65-7qqn8" podStartSLOduration=5.04105401 podStartE2EDuration="15.942710863s" podCreationTimestamp="2025-10-06 21:46:43 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.107732251 +0000 UTC m=+951.400768985" lastFinishedPulling="2025-10-06 21:46:57.009389094 +0000 UTC m=+962.302425838" observedRunningTime="2025-10-06 21:46:58.939050346 +0000 UTC m=+964.232087080" watchObservedRunningTime="2025-10-06 21:46:58.942710863 +0000 UTC m=+964.235747597" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.965971 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-54d485fd9-mq79t" podStartSLOduration=4.120109369 podStartE2EDuration="14.965949891s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.162707575 +0000 UTC m=+951.455744309" lastFinishedPulling="2025-10-06 21:46:57.008548077 +0000 UTC m=+962.301584831" observedRunningTime="2025-10-06 21:46:58.963820663 +0000 UTC m=+964.256857397" watchObservedRunningTime="2025-10-06 21:46:58.965949891 +0000 UTC m=+964.258986635" Oct 06 21:46:58 crc kubenswrapper[5014]: I1006 21:46:58.998171 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-58d86cd59d-8w8ws" podStartSLOduration=5.147954382 podStartE2EDuration="15.998156112s" podCreationTimestamp="2025-10-06 21:46:43 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.158399229 +0000 UTC m=+951.451435963" lastFinishedPulling="2025-10-06 21:46:57.008600959 +0000 UTC m=+962.301637693" observedRunningTime="2025-10-06 21:46:58.98294583 +0000 UTC m=+964.275982564" watchObservedRunningTime="2025-10-06 21:46:58.998156112 +0000 UTC m=+964.291192846" Oct 06 21:46:59 crc kubenswrapper[5014]: I1006 21:46:59.000925 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-69b956fbf6-8fg8b" podStartSLOduration=4.405343971 podStartE2EDuration="15.000907509s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.42464789 +0000 UTC m=+951.717684624" lastFinishedPulling="2025-10-06 21:46:57.020211418 +0000 UTC m=+962.313248162" observedRunningTime="2025-10-06 21:46:58.996127859 +0000 UTC m=+964.289164583" watchObservedRunningTime="2025-10-06 21:46:59.000907509 +0000 UTC m=+964.293944243" Oct 06 21:46:59 crc kubenswrapper[5014]: I1006 21:46:59.015442 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-5b84cc7657-ml985" podStartSLOduration=4.098612374 podStartE2EDuration="15.01542195s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.091956479 +0000 UTC m=+951.384993213" lastFinishedPulling="2025-10-06 21:46:57.008766015 +0000 UTC m=+962.301802789" observedRunningTime="2025-10-06 21:46:59.012409145 +0000 UTC m=+964.305445879" watchObservedRunningTime="2025-10-06 21:46:59.01542195 +0000 UTC m=+964.308458694" Oct 06 21:47:03 crc kubenswrapper[5014]: I1006 21:47:03.763248 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr" event={"ID":"b46c946e-8936-434c-8a13-0670857929d4","Type":"ContainerStarted","Data":"88d8a6b575557758220d960d4241cbba1ffde1c053ab4fbcd6665a6f48c41510"} Oct 06 21:47:03 crc kubenswrapper[5014]: I1006 21:47:03.764177 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr" Oct 06 21:47:03 crc kubenswrapper[5014]: I1006 21:47:03.769062 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6" event={"ID":"b73339a9-4a16-41e5-8739-4822f0199db4","Type":"ContainerStarted","Data":"b48d0b603a59658f6cbff420f44348d68fcad0ee28dad7e270b3ffc5d99cc370"} Oct 06 21:47:03 crc kubenswrapper[5014]: I1006 21:47:03.769329 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6" Oct 06 21:47:03 crc kubenswrapper[5014]: I1006 21:47:03.772878 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" event={"ID":"df9f59e9-4a23-4204-be0f-5f2729f419b1","Type":"ContainerStarted","Data":"39708718ce4da4c2f0ab3880797a5949344ad941a862bb2f60849b813d2971e2"} Oct 06 21:47:03 crc kubenswrapper[5014]: I1006 21:47:03.773430 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" Oct 06 21:47:03 crc kubenswrapper[5014]: I1006 21:47:03.775097 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb" event={"ID":"bade47f3-c50b-4f05-acfb-192e11c5b9e6","Type":"ContainerStarted","Data":"f0bda8ad95b37b898b28bcd91a4c6d2042c3805c8ecc6cc4a5f8ac8c8801ca10"} Oct 06 21:47:03 crc kubenswrapper[5014]: I1006 21:47:03.775548 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb" Oct 06 21:47:03 crc kubenswrapper[5014]: I1006 21:47:03.783925 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr" podStartSLOduration=3.156850561 podStartE2EDuration="19.783904993s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.545186966 +0000 UTC m=+951.838223700" lastFinishedPulling="2025-10-06 21:47:03.172241398 +0000 UTC m=+968.465278132" observedRunningTime="2025-10-06 21:47:03.777612753 +0000 UTC m=+969.070649497" watchObservedRunningTime="2025-10-06 21:47:03.783904993 +0000 UTC m=+969.076941727" Oct 06 21:47:03 crc kubenswrapper[5014]: I1006 21:47:03.797996 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6" podStartSLOduration=3.411038561 podStartE2EDuration="19.79797483s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.746007481 +0000 UTC m=+952.039044215" lastFinishedPulling="2025-10-06 21:47:03.13294374 +0000 UTC m=+968.425980484" observedRunningTime="2025-10-06 21:47:03.792879518 +0000 UTC m=+969.085916252" watchObservedRunningTime="2025-10-06 21:47:03.79797483 +0000 UTC m=+969.091011564" Oct 06 21:47:03 crc kubenswrapper[5014]: I1006 21:47:03.818365 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" podStartSLOduration=3.237358137 podStartE2EDuration="19.818342957s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.551281539 +0000 UTC m=+951.844318283" lastFinishedPulling="2025-10-06 21:47:03.132266369 +0000 UTC m=+968.425303103" observedRunningTime="2025-10-06 21:47:03.815568238 +0000 UTC m=+969.108604992" watchObservedRunningTime="2025-10-06 21:47:03.818342957 +0000 UTC m=+969.111379691" Oct 06 21:47:03 crc kubenswrapper[5014]: I1006 21:47:03.836181 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb" podStartSLOduration=3.428260257 podStartE2EDuration="19.836155432s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.74630997 +0000 UTC m=+952.039346704" lastFinishedPulling="2025-10-06 21:47:03.154205145 +0000 UTC m=+968.447241879" observedRunningTime="2025-10-06 21:47:03.836156802 +0000 UTC m=+969.129193536" watchObservedRunningTime="2025-10-06 21:47:03.836155432 +0000 UTC m=+969.129192166" Oct 06 21:47:04 crc kubenswrapper[5014]: I1006 21:47:04.276824 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-64f56ff694-fbnjx" Oct 06 21:47:04 crc kubenswrapper[5014]: I1006 21:47:04.282377 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-84bd8f6848-wpcz6" Oct 06 21:47:04 crc kubenswrapper[5014]: I1006 21:47:04.297863 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-58d86cd59d-8w8ws" Oct 06 21:47:04 crc kubenswrapper[5014]: I1006 21:47:04.356441 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-7ccfc8cf49-9k728" Oct 06 21:47:04 crc kubenswrapper[5014]: I1006 21:47:04.373094 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-fd648f65-7qqn8" Oct 06 21:47:04 crc kubenswrapper[5014]: I1006 21:47:04.424510 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5b477879bc-t5mpw" Oct 06 21:47:04 crc kubenswrapper[5014]: I1006 21:47:04.454439 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5467f8988c-qzk8x" Oct 06 21:47:04 crc kubenswrapper[5014]: I1006 21:47:04.485094 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-5b84cc7657-ml985" Oct 06 21:47:04 crc kubenswrapper[5014]: I1006 21:47:04.572821 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-69b956fbf6-8fg8b" Oct 06 21:47:04 crc kubenswrapper[5014]: I1006 21:47:04.598382 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-d6c9dc5bc-jngd6" Oct 06 21:47:04 crc kubenswrapper[5014]: I1006 21:47:04.692960 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-54d485fd9-mq79t" Oct 06 21:47:04 crc kubenswrapper[5014]: I1006 21:47:04.851975 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-f589c7597-scb7g" Oct 06 21:47:04 crc kubenswrapper[5014]: I1006 21:47:04.881953 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-6bb6dcddc-v5l5b" Oct 06 21:47:05 crc kubenswrapper[5014]: I1006 21:47:05.797012 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb" event={"ID":"6c35d36c-16ac-4ba1-9166-32c9a56ba6a0","Type":"ContainerStarted","Data":"4de7942e0cfe0a7220b442976476ffc87d809fc8c5a2658a3094bc06a0e11f13"} Oct 06 21:47:05 crc kubenswrapper[5014]: I1006 21:47:05.797230 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb" Oct 06 21:47:05 crc kubenswrapper[5014]: I1006 21:47:05.800481 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs" event={"ID":"41b602d9-b505-4656-8dfa-1443404db0c1","Type":"ContainerStarted","Data":"10087030e5b1287a99d2cb7cd2b47cd3db46dafcdea5422473da6644cccad922"} Oct 06 21:47:05 crc kubenswrapper[5014]: I1006 21:47:05.800689 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs" Oct 06 21:47:05 crc kubenswrapper[5014]: I1006 21:47:05.816916 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb" podStartSLOduration=3.119275339 podStartE2EDuration="21.816900045s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.612444471 +0000 UTC m=+951.905481205" lastFinishedPulling="2025-10-06 21:47:05.310069157 +0000 UTC m=+970.603105911" observedRunningTime="2025-10-06 21:47:05.815142189 +0000 UTC m=+971.108178923" watchObservedRunningTime="2025-10-06 21:47:05.816900045 +0000 UTC m=+971.109936779" Oct 06 21:47:05 crc kubenswrapper[5014]: I1006 21:47:05.834905 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs" podStartSLOduration=3.103796959 podStartE2EDuration="21.834888356s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.563431116 +0000 UTC m=+951.856467850" lastFinishedPulling="2025-10-06 21:47:05.294522503 +0000 UTC m=+970.587559247" observedRunningTime="2025-10-06 21:47:05.833169851 +0000 UTC m=+971.126206595" watchObservedRunningTime="2025-10-06 21:47:05.834888356 +0000 UTC m=+971.127925090" Oct 06 21:47:06 crc kubenswrapper[5014]: I1006 21:47:06.205212 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd" Oct 06 21:47:07 crc kubenswrapper[5014]: I1006 21:47:07.820256 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh" event={"ID":"b5c83451-721d-4897-b4b9-996ee0d7ae94","Type":"ContainerStarted","Data":"98d9014eec7939bda9cc10c2e35b61b5279b44b27d0e9d91e26ebafb540fd1fa"} Oct 06 21:47:07 crc kubenswrapper[5014]: I1006 21:47:07.820944 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh" Oct 06 21:47:07 crc kubenswrapper[5014]: I1006 21:47:07.824513 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr" event={"ID":"c1d9fb64-549f-4b44-bed2-b49d474beb39","Type":"ContainerStarted","Data":"04d4aeca918ffd14bbf392c55379964cfcd0fd067d214de1e94c05cfcc077b60"} Oct 06 21:47:07 crc kubenswrapper[5014]: I1006 21:47:07.851237 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh" podStartSLOduration=3.4181702769999998 podStartE2EDuration="23.851206899s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.527162924 +0000 UTC m=+951.820199658" lastFinishedPulling="2025-10-06 21:47:06.960199536 +0000 UTC m=+972.253236280" observedRunningTime="2025-10-06 21:47:07.84277 +0000 UTC m=+973.135806764" watchObservedRunningTime="2025-10-06 21:47:07.851206899 +0000 UTC m=+973.144243663" Oct 06 21:47:07 crc kubenswrapper[5014]: I1006 21:47:07.875242 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr" podStartSLOduration=3.667878013 podStartE2EDuration="23.875210761s" podCreationTimestamp="2025-10-06 21:46:44 +0000 UTC" firstStartedPulling="2025-10-06 21:46:46.746214737 +0000 UTC m=+952.039251471" lastFinishedPulling="2025-10-06 21:47:06.953547475 +0000 UTC m=+972.246584219" observedRunningTime="2025-10-06 21:47:07.865384029 +0000 UTC m=+973.158420803" watchObservedRunningTime="2025-10-06 21:47:07.875210761 +0000 UTC m=+973.168247535" Oct 06 21:47:14 crc kubenswrapper[5014]: I1006 21:47:14.561137 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-7cb48dbc-5b2mb" Oct 06 21:47:14 crc kubenswrapper[5014]: I1006 21:47:14.669276 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-6c9b57c67-hqrxh" Oct 06 21:47:14 crc kubenswrapper[5014]: I1006 21:47:14.682236 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-69f59f9d8-s6hxr" Oct 06 21:47:14 crc kubenswrapper[5014]: I1006 21:47:14.747306 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-66f6d6849b-kwbrs" Oct 06 21:47:14 crc kubenswrapper[5014]: I1006 21:47:14.807612 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-5d98cc5575-md7kb" Oct 06 21:47:14 crc kubenswrapper[5014]: I1006 21:47:14.839777 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-76d5577b-w8ds6" Oct 06 21:47:15 crc kubenswrapper[5014]: I1006 21:47:15.031440 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-84788b6bc5-zbcqh" Oct 06 21:47:21 crc kubenswrapper[5014]: I1006 21:47:21.735209 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:47:21 crc kubenswrapper[5014]: I1006 21:47:21.736343 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.370075 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bfcb9d745-wzd79"] Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.372682 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bfcb9d745-wzd79" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.374806 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.375471 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-g8lvq" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.375692 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.377099 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.378116 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bfcb9d745-wzd79"] Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.398547 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccc4931c-f91c-460e-961d-074196743645-config\") pod \"dnsmasq-dns-7bfcb9d745-wzd79\" (UID: \"ccc4931c-f91c-460e-961d-074196743645\") " pod="openstack/dnsmasq-dns-7bfcb9d745-wzd79" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.398660 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrb9m\" (UniqueName: \"kubernetes.io/projected/ccc4931c-f91c-460e-961d-074196743645-kube-api-access-rrb9m\") pod \"dnsmasq-dns-7bfcb9d745-wzd79\" (UID: \"ccc4931c-f91c-460e-961d-074196743645\") " pod="openstack/dnsmasq-dns-7bfcb9d745-wzd79" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.442653 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-758b79db4c-f8s4q"] Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.443678 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-758b79db4c-f8s4q" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.445995 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.465213 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-758b79db4c-f8s4q"] Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.499747 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-config\") pod \"dnsmasq-dns-758b79db4c-f8s4q\" (UID: \"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240\") " pod="openstack/dnsmasq-dns-758b79db4c-f8s4q" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.499799 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrb9m\" (UniqueName: \"kubernetes.io/projected/ccc4931c-f91c-460e-961d-074196743645-kube-api-access-rrb9m\") pod \"dnsmasq-dns-7bfcb9d745-wzd79\" (UID: \"ccc4931c-f91c-460e-961d-074196743645\") " pod="openstack/dnsmasq-dns-7bfcb9d745-wzd79" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.499846 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-dns-svc\") pod \"dnsmasq-dns-758b79db4c-f8s4q\" (UID: \"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240\") " pod="openstack/dnsmasq-dns-758b79db4c-f8s4q" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.500048 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccc4931c-f91c-460e-961d-074196743645-config\") pod \"dnsmasq-dns-7bfcb9d745-wzd79\" (UID: \"ccc4931c-f91c-460e-961d-074196743645\") " pod="openstack/dnsmasq-dns-7bfcb9d745-wzd79" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.500139 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gprw2\" (UniqueName: \"kubernetes.io/projected/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-kube-api-access-gprw2\") pod \"dnsmasq-dns-758b79db4c-f8s4q\" (UID: \"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240\") " pod="openstack/dnsmasq-dns-758b79db4c-f8s4q" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.501056 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccc4931c-f91c-460e-961d-074196743645-config\") pod \"dnsmasq-dns-7bfcb9d745-wzd79\" (UID: \"ccc4931c-f91c-460e-961d-074196743645\") " pod="openstack/dnsmasq-dns-7bfcb9d745-wzd79" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.518573 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrb9m\" (UniqueName: \"kubernetes.io/projected/ccc4931c-f91c-460e-961d-074196743645-kube-api-access-rrb9m\") pod \"dnsmasq-dns-7bfcb9d745-wzd79\" (UID: \"ccc4931c-f91c-460e-961d-074196743645\") " pod="openstack/dnsmasq-dns-7bfcb9d745-wzd79" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.601193 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-config\") pod \"dnsmasq-dns-758b79db4c-f8s4q\" (UID: \"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240\") " pod="openstack/dnsmasq-dns-758b79db4c-f8s4q" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.601260 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-dns-svc\") pod \"dnsmasq-dns-758b79db4c-f8s4q\" (UID: \"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240\") " pod="openstack/dnsmasq-dns-758b79db4c-f8s4q" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.601330 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gprw2\" (UniqueName: \"kubernetes.io/projected/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-kube-api-access-gprw2\") pod \"dnsmasq-dns-758b79db4c-f8s4q\" (UID: \"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240\") " pod="openstack/dnsmasq-dns-758b79db4c-f8s4q" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.602453 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-config\") pod \"dnsmasq-dns-758b79db4c-f8s4q\" (UID: \"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240\") " pod="openstack/dnsmasq-dns-758b79db4c-f8s4q" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.603360 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-dns-svc\") pod \"dnsmasq-dns-758b79db4c-f8s4q\" (UID: \"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240\") " pod="openstack/dnsmasq-dns-758b79db4c-f8s4q" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.625765 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gprw2\" (UniqueName: \"kubernetes.io/projected/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-kube-api-access-gprw2\") pod \"dnsmasq-dns-758b79db4c-f8s4q\" (UID: \"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240\") " pod="openstack/dnsmasq-dns-758b79db4c-f8s4q" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.699055 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bfcb9d745-wzd79" Oct 06 21:47:32 crc kubenswrapper[5014]: I1006 21:47:32.761360 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-758b79db4c-f8s4q" Oct 06 21:47:33 crc kubenswrapper[5014]: I1006 21:47:33.227884 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bfcb9d745-wzd79"] Oct 06 21:47:33 crc kubenswrapper[5014]: I1006 21:47:33.280048 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-758b79db4c-f8s4q"] Oct 06 21:47:34 crc kubenswrapper[5014]: I1006 21:47:34.058853 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bfcb9d745-wzd79" event={"ID":"ccc4931c-f91c-460e-961d-074196743645","Type":"ContainerStarted","Data":"065246d3b68b542c9286a41572249397d16be2f56b52599e45569840924bc07b"} Oct 06 21:47:34 crc kubenswrapper[5014]: I1006 21:47:34.059911 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-758b79db4c-f8s4q" event={"ID":"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240","Type":"ContainerStarted","Data":"3f1db10e0463d91345cb8499a39cc60bf86ad7a71a3206848a777c8aa552197d"} Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.195047 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-758b79db4c-f8s4q"] Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.213886 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-644597f84c-gc96s"] Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.215440 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-644597f84c-gc96s" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.260484 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-644597f84c-gc96s"] Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.263301 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5581f1cf-4f5f-4042-a04b-22a966f20b23-config\") pod \"dnsmasq-dns-644597f84c-gc96s\" (UID: \"5581f1cf-4f5f-4042-a04b-22a966f20b23\") " pod="openstack/dnsmasq-dns-644597f84c-gc96s" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.263364 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5581f1cf-4f5f-4042-a04b-22a966f20b23-dns-svc\") pod \"dnsmasq-dns-644597f84c-gc96s\" (UID: \"5581f1cf-4f5f-4042-a04b-22a966f20b23\") " pod="openstack/dnsmasq-dns-644597f84c-gc96s" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.263383 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kvln\" (UniqueName: \"kubernetes.io/projected/5581f1cf-4f5f-4042-a04b-22a966f20b23-kube-api-access-6kvln\") pod \"dnsmasq-dns-644597f84c-gc96s\" (UID: \"5581f1cf-4f5f-4042-a04b-22a966f20b23\") " pod="openstack/dnsmasq-dns-644597f84c-gc96s" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.367427 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5581f1cf-4f5f-4042-a04b-22a966f20b23-config\") pod \"dnsmasq-dns-644597f84c-gc96s\" (UID: \"5581f1cf-4f5f-4042-a04b-22a966f20b23\") " pod="openstack/dnsmasq-dns-644597f84c-gc96s" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.367511 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5581f1cf-4f5f-4042-a04b-22a966f20b23-dns-svc\") pod \"dnsmasq-dns-644597f84c-gc96s\" (UID: \"5581f1cf-4f5f-4042-a04b-22a966f20b23\") " pod="openstack/dnsmasq-dns-644597f84c-gc96s" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.367535 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kvln\" (UniqueName: \"kubernetes.io/projected/5581f1cf-4f5f-4042-a04b-22a966f20b23-kube-api-access-6kvln\") pod \"dnsmasq-dns-644597f84c-gc96s\" (UID: \"5581f1cf-4f5f-4042-a04b-22a966f20b23\") " pod="openstack/dnsmasq-dns-644597f84c-gc96s" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.369068 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5581f1cf-4f5f-4042-a04b-22a966f20b23-config\") pod \"dnsmasq-dns-644597f84c-gc96s\" (UID: \"5581f1cf-4f5f-4042-a04b-22a966f20b23\") " pod="openstack/dnsmasq-dns-644597f84c-gc96s" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.369412 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5581f1cf-4f5f-4042-a04b-22a966f20b23-dns-svc\") pod \"dnsmasq-dns-644597f84c-gc96s\" (UID: \"5581f1cf-4f5f-4042-a04b-22a966f20b23\") " pod="openstack/dnsmasq-dns-644597f84c-gc96s" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.403694 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kvln\" (UniqueName: \"kubernetes.io/projected/5581f1cf-4f5f-4042-a04b-22a966f20b23-kube-api-access-6kvln\") pod \"dnsmasq-dns-644597f84c-gc96s\" (UID: \"5581f1cf-4f5f-4042-a04b-22a966f20b23\") " pod="openstack/dnsmasq-dns-644597f84c-gc96s" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.537141 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-644597f84c-gc96s" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.607401 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bfcb9d745-wzd79"] Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.643989 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77597f887-ctt8h"] Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.646210 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77597f887-ctt8h" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.652555 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77597f887-ctt8h"] Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.780577 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-config\") pod \"dnsmasq-dns-77597f887-ctt8h\" (UID: \"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c\") " pod="openstack/dnsmasq-dns-77597f887-ctt8h" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.780655 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-dns-svc\") pod \"dnsmasq-dns-77597f887-ctt8h\" (UID: \"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c\") " pod="openstack/dnsmasq-dns-77597f887-ctt8h" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.780726 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4t8h8\" (UniqueName: \"kubernetes.io/projected/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-kube-api-access-4t8h8\") pod \"dnsmasq-dns-77597f887-ctt8h\" (UID: \"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c\") " pod="openstack/dnsmasq-dns-77597f887-ctt8h" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.882573 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4t8h8\" (UniqueName: \"kubernetes.io/projected/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-kube-api-access-4t8h8\") pod \"dnsmasq-dns-77597f887-ctt8h\" (UID: \"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c\") " pod="openstack/dnsmasq-dns-77597f887-ctt8h" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.882690 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-config\") pod \"dnsmasq-dns-77597f887-ctt8h\" (UID: \"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c\") " pod="openstack/dnsmasq-dns-77597f887-ctt8h" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.882774 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-dns-svc\") pod \"dnsmasq-dns-77597f887-ctt8h\" (UID: \"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c\") " pod="openstack/dnsmasq-dns-77597f887-ctt8h" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.883890 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-config\") pod \"dnsmasq-dns-77597f887-ctt8h\" (UID: \"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c\") " pod="openstack/dnsmasq-dns-77597f887-ctt8h" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.888126 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-dns-svc\") pod \"dnsmasq-dns-77597f887-ctt8h\" (UID: \"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c\") " pod="openstack/dnsmasq-dns-77597f887-ctt8h" Oct 06 21:47:35 crc kubenswrapper[5014]: I1006 21:47:35.903525 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4t8h8\" (UniqueName: \"kubernetes.io/projected/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-kube-api-access-4t8h8\") pod \"dnsmasq-dns-77597f887-ctt8h\" (UID: \"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c\") " pod="openstack/dnsmasq-dns-77597f887-ctt8h" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.022075 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77597f887-ctt8h" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.183942 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-644597f84c-gc96s"] Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.445863 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.447453 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.455301 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.455381 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.455645 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.456364 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.457409 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-wbhtg" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.458837 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.459968 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.481672 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.492243 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.492318 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.492350 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-config-data\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.492898 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4b977fc8-6c11-41e6-9500-f0da2d66aea1-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.493045 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.493170 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.493383 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4b977fc8-6c11-41e6-9500-f0da2d66aea1-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.493569 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.494981 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.495881 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.496245 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwpgx\" (UniqueName: \"kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-kube-api-access-wwpgx\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.597747 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.597800 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.597823 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-config-data\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.597840 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4b977fc8-6c11-41e6-9500-f0da2d66aea1-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.597863 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.597886 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.597924 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4b977fc8-6c11-41e6-9500-f0da2d66aea1-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.597950 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.597967 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.597996 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.598018 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwpgx\" (UniqueName: \"kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-kube-api-access-wwpgx\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.598817 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.599821 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.600027 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-config-data\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.600718 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.600847 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.604405 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4b977fc8-6c11-41e6-9500-f0da2d66aea1-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.604718 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4b977fc8-6c11-41e6-9500-f0da2d66aea1-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.604901 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.604922 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.609784 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.616712 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwpgx\" (UniqueName: \"kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-kube-api-access-wwpgx\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.632432 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.764818 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.768271 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.771597 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.771767 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.771984 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.772095 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.772195 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.772676 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-kcb4l" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.772797 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.773733 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.778695 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.801204 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.801275 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.801309 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.801330 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.801347 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qjbz\" (UniqueName: \"kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-kube-api-access-2qjbz\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.801367 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.801399 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.801427 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.801448 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.801469 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.801488 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.903531 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.903579 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.903602 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.903634 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qjbz\" (UniqueName: \"kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-kube-api-access-2qjbz\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.903659 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.903718 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.903751 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.903768 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.903789 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.903810 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.903841 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.904566 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.904772 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.905224 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.905532 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.906829 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.907749 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.909464 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.912404 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.923012 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qjbz\" (UniqueName: \"kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-kube-api-access-2qjbz\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.923494 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.925576 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:36 crc kubenswrapper[5014]: I1006 21:47:36.927090 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:37 crc kubenswrapper[5014]: I1006 21:47:37.098703 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:47:37 crc kubenswrapper[5014]: I1006 21:47:37.981261 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Oct 06 21:47:37 crc kubenswrapper[5014]: I1006 21:47:37.986576 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Oct 06 21:47:37 crc kubenswrapper[5014]: I1006 21:47:37.994414 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Oct 06 21:47:37 crc kubenswrapper[5014]: I1006 21:47:37.994923 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Oct 06 21:47:37 crc kubenswrapper[5014]: I1006 21:47:37.994962 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Oct 06 21:47:37 crc kubenswrapper[5014]: I1006 21:47:37.994648 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Oct 06 21:47:37 crc kubenswrapper[5014]: I1006 21:47:37.995173 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-8qr48" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.000536 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.007509 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.123907 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.124331 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.124425 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.124451 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkqk2\" (UniqueName: \"kubernetes.io/projected/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-kube-api-access-lkqk2\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.124504 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.124524 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.124612 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-kolla-config\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.124719 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-config-data-default\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.124757 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-secrets\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.225936 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkqk2\" (UniqueName: \"kubernetes.io/projected/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-kube-api-access-lkqk2\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.226056 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.226092 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.226145 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-kolla-config\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.226195 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-config-data-default\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.226251 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-secrets\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.226296 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.226321 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.226392 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.226773 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.227060 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-kolla-config\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.227354 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.227512 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-config-data-default\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.227636 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.231282 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-secrets\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.231769 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.242647 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.246729 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkqk2\" (UniqueName: \"kubernetes.io/projected/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-kube-api-access-lkqk2\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.260603 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " pod="openstack/openstack-galera-0" Oct 06 21:47:38 crc kubenswrapper[5014]: I1006 21:47:38.305716 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.371403 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.378244 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.383176 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.383386 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.384088 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-5sjbz" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.384138 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.384337 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.554916 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.554979 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.555013 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.555032 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.555051 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.555086 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.555113 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3b18812d-9eec-4254-8633-b40f55244e47-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.555178 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.555205 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85hpl\" (UniqueName: \"kubernetes.io/projected/3b18812d-9eec-4254-8633-b40f55244e47-kube-api-access-85hpl\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.656723 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.656833 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3b18812d-9eec-4254-8633-b40f55244e47-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.656877 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.656917 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85hpl\" (UniqueName: \"kubernetes.io/projected/3b18812d-9eec-4254-8633-b40f55244e47-kube-api-access-85hpl\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.656974 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.657018 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.657061 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.657089 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.657114 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.657415 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3b18812d-9eec-4254-8633-b40f55244e47-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.657686 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.658100 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.658692 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.658889 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.663132 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.665572 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.670352 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.679281 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85hpl\" (UniqueName: \"kubernetes.io/projected/3b18812d-9eec-4254-8633-b40f55244e47-kube-api-access-85hpl\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.680190 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.697391 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.752122 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.753595 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.755390 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-c7vfr" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.756195 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.756573 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.767903 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.859266 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cd16c866-91b0-4261-a084-7a96ac597c04-kolla-config\") pod \"memcached-0\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " pod="openstack/memcached-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.859319 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4vcq\" (UniqueName: \"kubernetes.io/projected/cd16c866-91b0-4261-a084-7a96ac597c04-kube-api-access-g4vcq\") pod \"memcached-0\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " pod="openstack/memcached-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.859350 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd16c866-91b0-4261-a084-7a96ac597c04-memcached-tls-certs\") pod \"memcached-0\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " pod="openstack/memcached-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.859426 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd16c866-91b0-4261-a084-7a96ac597c04-combined-ca-bundle\") pod \"memcached-0\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " pod="openstack/memcached-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.859447 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cd16c866-91b0-4261-a084-7a96ac597c04-config-data\") pod \"memcached-0\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " pod="openstack/memcached-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.960566 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd16c866-91b0-4261-a084-7a96ac597c04-combined-ca-bundle\") pod \"memcached-0\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " pod="openstack/memcached-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.960855 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cd16c866-91b0-4261-a084-7a96ac597c04-config-data\") pod \"memcached-0\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " pod="openstack/memcached-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.960949 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cd16c866-91b0-4261-a084-7a96ac597c04-kolla-config\") pod \"memcached-0\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " pod="openstack/memcached-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.961039 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4vcq\" (UniqueName: \"kubernetes.io/projected/cd16c866-91b0-4261-a084-7a96ac597c04-kube-api-access-g4vcq\") pod \"memcached-0\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " pod="openstack/memcached-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.961128 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd16c866-91b0-4261-a084-7a96ac597c04-memcached-tls-certs\") pod \"memcached-0\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " pod="openstack/memcached-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.964843 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cd16c866-91b0-4261-a084-7a96ac597c04-config-data\") pod \"memcached-0\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " pod="openstack/memcached-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.965695 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cd16c866-91b0-4261-a084-7a96ac597c04-kolla-config\") pod \"memcached-0\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " pod="openstack/memcached-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.967850 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd16c866-91b0-4261-a084-7a96ac597c04-memcached-tls-certs\") pod \"memcached-0\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " pod="openstack/memcached-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.968390 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd16c866-91b0-4261-a084-7a96ac597c04-combined-ca-bundle\") pod \"memcached-0\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " pod="openstack/memcached-0" Oct 06 21:47:39 crc kubenswrapper[5014]: I1006 21:47:39.981538 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4vcq\" (UniqueName: \"kubernetes.io/projected/cd16c866-91b0-4261-a084-7a96ac597c04-kube-api-access-g4vcq\") pod \"memcached-0\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " pod="openstack/memcached-0" Oct 06 21:47:40 crc kubenswrapper[5014]: I1006 21:47:40.089686 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Oct 06 21:47:40 crc kubenswrapper[5014]: I1006 21:47:40.116717 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-644597f84c-gc96s" event={"ID":"5581f1cf-4f5f-4042-a04b-22a966f20b23","Type":"ContainerStarted","Data":"42337f47576fab0d858cf0c2d4c85b206d511fd22ab1186abf34ecbc74685f4e"} Oct 06 21:47:41 crc kubenswrapper[5014]: I1006 21:47:41.290462 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Oct 06 21:47:41 crc kubenswrapper[5014]: I1006 21:47:41.291990 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 06 21:47:41 crc kubenswrapper[5014]: I1006 21:47:41.293881 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-spvjv" Oct 06 21:47:41 crc kubenswrapper[5014]: I1006 21:47:41.311789 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 06 21:47:41 crc kubenswrapper[5014]: I1006 21:47:41.384412 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqfmm\" (UniqueName: \"kubernetes.io/projected/81112a98-d817-4bf4-bb1e-288cb62e8577-kube-api-access-nqfmm\") pod \"kube-state-metrics-0\" (UID: \"81112a98-d817-4bf4-bb1e-288cb62e8577\") " pod="openstack/kube-state-metrics-0" Oct 06 21:47:41 crc kubenswrapper[5014]: I1006 21:47:41.485146 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqfmm\" (UniqueName: \"kubernetes.io/projected/81112a98-d817-4bf4-bb1e-288cb62e8577-kube-api-access-nqfmm\") pod \"kube-state-metrics-0\" (UID: \"81112a98-d817-4bf4-bb1e-288cb62e8577\") " pod="openstack/kube-state-metrics-0" Oct 06 21:47:41 crc kubenswrapper[5014]: I1006 21:47:41.504258 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqfmm\" (UniqueName: \"kubernetes.io/projected/81112a98-d817-4bf4-bb1e-288cb62e8577-kube-api-access-nqfmm\") pod \"kube-state-metrics-0\" (UID: \"81112a98-d817-4bf4-bb1e-288cb62e8577\") " pod="openstack/kube-state-metrics-0" Oct 06 21:47:41 crc kubenswrapper[5014]: I1006 21:47:41.612374 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.442250 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-f4vpp"] Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.443515 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.446546 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.447354 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-88pnq" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.449458 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.458531 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-f4vpp"] Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.470567 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-fwbdt"] Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.472780 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.548971 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-fwbdt"] Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.571575 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/74db136d-3445-4a7e-bcae-4645888ec806-ovn-controller-tls-certs\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.571641 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74db136d-3445-4a7e-bcae-4645888ec806-combined-ca-bundle\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.571664 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hl7qk\" (UniqueName: \"kubernetes.io/projected/74db136d-3445-4a7e-bcae-4645888ec806-kube-api-access-hl7qk\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.571691 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvq6s\" (UniqueName: \"kubernetes.io/projected/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-kube-api-access-vvq6s\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.571725 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-etc-ovs\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.571744 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-log\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.571781 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-lib\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.571797 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/74db136d-3445-4a7e-bcae-4645888ec806-scripts\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.572030 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-run-ovn\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.572068 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-scripts\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.572097 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-run\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.572125 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-log-ovn\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.572139 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-run\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.676565 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-run-ovn\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.676639 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-scripts\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.676670 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-run\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.676697 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-log-ovn\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.676712 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-run\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.676736 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/74db136d-3445-4a7e-bcae-4645888ec806-ovn-controller-tls-certs\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.676760 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74db136d-3445-4a7e-bcae-4645888ec806-combined-ca-bundle\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.676781 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hl7qk\" (UniqueName: \"kubernetes.io/projected/74db136d-3445-4a7e-bcae-4645888ec806-kube-api-access-hl7qk\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.676809 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvq6s\" (UniqueName: \"kubernetes.io/projected/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-kube-api-access-vvq6s\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.676835 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-etc-ovs\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.676851 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-log\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.676905 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-lib\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.676922 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/74db136d-3445-4a7e-bcae-4645888ec806-scripts\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.677422 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-run\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.677577 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-run-ovn\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.679347 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/74db136d-3445-4a7e-bcae-4645888ec806-scripts\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.679502 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-log-ovn\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.681860 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-log\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.682076 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-etc-ovs\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.684566 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-lib\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.684901 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-run\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.685194 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-scripts\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.687634 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/74db136d-3445-4a7e-bcae-4645888ec806-ovn-controller-tls-certs\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.687812 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74db136d-3445-4a7e-bcae-4645888ec806-combined-ca-bundle\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.696794 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hl7qk\" (UniqueName: \"kubernetes.io/projected/74db136d-3445-4a7e-bcae-4645888ec806-kube-api-access-hl7qk\") pod \"ovn-controller-f4vpp\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.697250 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvq6s\" (UniqueName: \"kubernetes.io/projected/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-kube-api-access-vvq6s\") pod \"ovn-controller-ovs-fwbdt\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.770461 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.804931 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.806790 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.815440 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-pr7pm" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.816315 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.816336 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.816801 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.816382 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.816430 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.817362 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.880725 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c7d1001f-b56b-4d52-88bc-4f23831c3509-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.880784 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.880815 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7d1001f-b56b-4d52-88bc-4f23831c3509-config\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.881026 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cw7b\" (UniqueName: \"kubernetes.io/projected/c7d1001f-b56b-4d52-88bc-4f23831c3509-kube-api-access-2cw7b\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.881050 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c7d1001f-b56b-4d52-88bc-4f23831c3509-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.881097 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.881125 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.881148 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.982336 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.982393 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.982438 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.982497 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c7d1001f-b56b-4d52-88bc-4f23831c3509-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.982524 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.982567 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7d1001f-b56b-4d52-88bc-4f23831c3509-config\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.982585 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cw7b\" (UniqueName: \"kubernetes.io/projected/c7d1001f-b56b-4d52-88bc-4f23831c3509-kube-api-access-2cw7b\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.982605 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c7d1001f-b56b-4d52-88bc-4f23831c3509-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.983196 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c7d1001f-b56b-4d52-88bc-4f23831c3509-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.983268 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.987809 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c7d1001f-b56b-4d52-88bc-4f23831c3509-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.990399 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.994075 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:45 crc kubenswrapper[5014]: I1006 21:47:45.994182 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7d1001f-b56b-4d52-88bc-4f23831c3509-config\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:46 crc kubenswrapper[5014]: I1006 21:47:45.997720 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:46 crc kubenswrapper[5014]: I1006 21:47:46.000059 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cw7b\" (UniqueName: \"kubernetes.io/projected/c7d1001f-b56b-4d52-88bc-4f23831c3509-kube-api-access-2cw7b\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:46 crc kubenswrapper[5014]: I1006 21:47:46.021082 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:46 crc kubenswrapper[5014]: I1006 21:47:46.150303 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 06 21:47:47 crc kubenswrapper[5014]: E1006 21:47:47.864083 5014 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df" Oct 06 21:47:47 crc kubenswrapper[5014]: E1006 21:47:47.864650 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gprw2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-758b79db4c-f8s4q_openstack(a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 06 21:47:47 crc kubenswrapper[5014]: E1006 21:47:47.867155 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-758b79db4c-f8s4q" podUID="a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240" Oct 06 21:47:47 crc kubenswrapper[5014]: E1006 21:47:47.888276 5014 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df" Oct 06 21:47:47 crc kubenswrapper[5014]: E1006 21:47:47.888465 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rrb9m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-7bfcb9d745-wzd79_openstack(ccc4931c-f91c-460e-961d-074196743645): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 06 21:47:47 crc kubenswrapper[5014]: E1006 21:47:47.889823 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-7bfcb9d745-wzd79" podUID="ccc4931c-f91c-460e-961d-074196743645" Oct 06 21:47:48 crc kubenswrapper[5014]: I1006 21:47:48.652415 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bfcb9d745-wzd79" Oct 06 21:47:48 crc kubenswrapper[5014]: I1006 21:47:48.733560 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrb9m\" (UniqueName: \"kubernetes.io/projected/ccc4931c-f91c-460e-961d-074196743645-kube-api-access-rrb9m\") pod \"ccc4931c-f91c-460e-961d-074196743645\" (UID: \"ccc4931c-f91c-460e-961d-074196743645\") " Oct 06 21:47:48 crc kubenswrapper[5014]: I1006 21:47:48.733741 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccc4931c-f91c-460e-961d-074196743645-config\") pod \"ccc4931c-f91c-460e-961d-074196743645\" (UID: \"ccc4931c-f91c-460e-961d-074196743645\") " Oct 06 21:47:48 crc kubenswrapper[5014]: I1006 21:47:48.734542 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccc4931c-f91c-460e-961d-074196743645-config" (OuterVolumeSpecName: "config") pod "ccc4931c-f91c-460e-961d-074196743645" (UID: "ccc4931c-f91c-460e-961d-074196743645"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:47:48 crc kubenswrapper[5014]: I1006 21:47:48.739153 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccc4931c-f91c-460e-961d-074196743645-kube-api-access-rrb9m" (OuterVolumeSpecName: "kube-api-access-rrb9m") pod "ccc4931c-f91c-460e-961d-074196743645" (UID: "ccc4931c-f91c-460e-961d-074196743645"). InnerVolumeSpecName "kube-api-access-rrb9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:47:48 crc kubenswrapper[5014]: I1006 21:47:48.836457 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrb9m\" (UniqueName: \"kubernetes.io/projected/ccc4931c-f91c-460e-961d-074196743645-kube-api-access-rrb9m\") on node \"crc\" DevicePath \"\"" Oct 06 21:47:48 crc kubenswrapper[5014]: I1006 21:47:48.836494 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccc4931c-f91c-460e-961d-074196743645-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:47:48 crc kubenswrapper[5014]: I1006 21:47:48.945226 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.003577 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-758b79db4c-f8s4q" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.024225 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77597f887-ctt8h"] Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.039322 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-config\") pod \"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240\" (UID: \"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240\") " Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.039409 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gprw2\" (UniqueName: \"kubernetes.io/projected/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-kube-api-access-gprw2\") pod \"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240\" (UID: \"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240\") " Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.039445 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-dns-svc\") pod \"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240\" (UID: \"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240\") " Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.040768 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-config" (OuterVolumeSpecName: "config") pod "a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240" (UID: "a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.041893 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240" (UID: "a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.049882 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-kube-api-access-gprw2" (OuterVolumeSpecName: "kube-api-access-gprw2") pod "a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240" (UID: "a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240"). InnerVolumeSpecName "kube-api-access-gprw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.092803 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.095068 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.097491 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.098177 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.099731 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-xhsx8" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.107748 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.122856 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 06 21:47:49 crc kubenswrapper[5014]: W1006 21:47:49.140306 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81112a98_d817_4bf4_bb1e_288cb62e8577.slice/crio-f1220b7365b3ea434ec51cf42b3defeecd4c4fdd220439a9793ea3b746225024 WatchSource:0}: Error finding container f1220b7365b3ea434ec51cf42b3defeecd4c4fdd220439a9793ea3b746225024: Status 404 returned error can't find the container with id f1220b7365b3ea434ec51cf42b3defeecd4c4fdd220439a9793ea3b746225024 Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.141649 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.141948 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gprw2\" (UniqueName: \"kubernetes.io/projected/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-kube-api-access-gprw2\") on node \"crc\" DevicePath \"\"" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.141963 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.141785 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 06 21:47:49 crc kubenswrapper[5014]: W1006 21:47:49.158572 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b977fc8_6c11_41e6_9500_f0da2d66aea1.slice/crio-6cafdd1c4b027c8666bbc2f9206f42e474fdfb5f4dc256a546e92a716c66b2c9 WatchSource:0}: Error finding container 6cafdd1c4b027c8666bbc2f9206f42e474fdfb5f4dc256a546e92a716c66b2c9: Status 404 returned error can't find the container with id 6cafdd1c4b027c8666bbc2f9206f42e474fdfb5f4dc256a546e92a716c66b2c9 Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.178486 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.195772 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.204612 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 06 21:47:49 crc kubenswrapper[5014]: W1006 21:47:49.209912 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc7d1001f_b56b_4d52_88bc_4f23831c3509.slice/crio-585b70c5b2c537198fef8bf78d157546bd8594061a8f3754f0065278ca9e18d6 WatchSource:0}: Error finding container 585b70c5b2c537198fef8bf78d157546bd8594061a8f3754f0065278ca9e18d6: Status 404 returned error can't find the container with id 585b70c5b2c537198fef8bf78d157546bd8594061a8f3754f0065278ca9e18d6 Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.211648 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"cd16c866-91b0-4261-a084-7a96ac597c04","Type":"ContainerStarted","Data":"020ebec827fa632394d91894455ff61a17986eaf8dbf508e23037a819940b99f"} Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.213718 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.215753 5014 generic.go:334] "Generic (PLEG): container finished" podID="5581f1cf-4f5f-4042-a04b-22a966f20b23" containerID="7ce9edc49743e08bed8809b10f9979dcf1260da3bcb647d3a6209f71507e3d1e" exitCode=0 Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.215840 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-644597f84c-gc96s" event={"ID":"5581f1cf-4f5f-4042-a04b-22a966f20b23","Type":"ContainerDied","Data":"7ce9edc49743e08bed8809b10f9979dcf1260da3bcb647d3a6209f71507e3d1e"} Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.218232 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-f4vpp"] Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.220663 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f4vpp" event={"ID":"74db136d-3445-4a7e-bcae-4645888ec806","Type":"ContainerStarted","Data":"c04b471884820c8a60d36e31f1b5f50ec48bb852b1521f331933734ff789c995"} Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.221945 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3b18812d-9eec-4254-8633-b40f55244e47","Type":"ContainerStarted","Data":"08394ba6b24bcdee49c1708e48bd4765cfbaf33846d9caad10f595fabf98baf2"} Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.223870 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4b977fc8-6c11-41e6-9500-f0da2d66aea1","Type":"ContainerStarted","Data":"6cafdd1c4b027c8666bbc2f9206f42e474fdfb5f4dc256a546e92a716c66b2c9"} Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.225926 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bfcb9d745-wzd79" event={"ID":"ccc4931c-f91c-460e-961d-074196743645","Type":"ContainerDied","Data":"065246d3b68b542c9286a41572249397d16be2f56b52599e45569840924bc07b"} Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.226019 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bfcb9d745-wzd79" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.228147 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.238950 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c8f59d7d-f71b-46b0-bd32-476a2517a3b6","Type":"ContainerStarted","Data":"6f1e338aff0051925912772b4d9266eeb391a4d547b790e5d76d0bc406be12bb"} Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.243170 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.243218 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1132a0d0-bc9b-430d-a89e-33455c763b3c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.243239 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6d994\" (UniqueName: \"kubernetes.io/projected/1132a0d0-bc9b-430d-a89e-33455c763b3c-kube-api-access-6d994\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.243264 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1132a0d0-bc9b-430d-a89e-33455c763b3c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.243333 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.243363 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1132a0d0-bc9b-430d-a89e-33455c763b3c-config\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.243398 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.243413 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.243821 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-758b79db4c-f8s4q" event={"ID":"a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240","Type":"ContainerDied","Data":"3f1db10e0463d91345cb8499a39cc60bf86ad7a71a3206848a777c8aa552197d"} Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.243825 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-758b79db4c-f8s4q" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.245108 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77597f887-ctt8h" event={"ID":"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c","Type":"ContainerStarted","Data":"e7d6054aae787b24008aee6ea0ded3e8c54951463a1336d42d43058df185f36c"} Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.247845 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"81112a98-d817-4bf4-bb1e-288cb62e8577","Type":"ContainerStarted","Data":"f1220b7365b3ea434ec51cf42b3defeecd4c4fdd220439a9793ea3b746225024"} Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.250460 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd","Type":"ContainerStarted","Data":"1b002e4dd7c369d9795a24bfa89f81639f0653181764f6199e6579c4ea3af0e3"} Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.281371 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-fwbdt"] Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.303456 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bfcb9d745-wzd79"] Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.309448 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bfcb9d745-wzd79"] Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.321679 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-758b79db4c-f8s4q"] Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.323810 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-758b79db4c-f8s4q"] Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.345089 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1132a0d0-bc9b-430d-a89e-33455c763b3c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.345139 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6d994\" (UniqueName: \"kubernetes.io/projected/1132a0d0-bc9b-430d-a89e-33455c763b3c-kube-api-access-6d994\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.345163 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1132a0d0-bc9b-430d-a89e-33455c763b3c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.345266 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.345301 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1132a0d0-bc9b-430d-a89e-33455c763b3c-config\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.345332 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.345352 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.345377 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.346767 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.352548 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1132a0d0-bc9b-430d-a89e-33455c763b3c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.352765 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1132a0d0-bc9b-430d-a89e-33455c763b3c-config\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.354996 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.355508 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.355741 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1132a0d0-bc9b-430d-a89e-33455c763b3c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.362491 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.376120 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6d994\" (UniqueName: \"kubernetes.io/projected/1132a0d0-bc9b-430d-a89e-33455c763b3c-kube-api-access-6d994\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.388126 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.473088 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.506930 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240" path="/var/lib/kubelet/pods/a7ea2d5f-3eef-45d6-9c8d-5dc2e5ddd240/volumes" Oct 06 21:47:49 crc kubenswrapper[5014]: I1006 21:47:49.507829 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccc4931c-f91c-460e-961d-074196743645" path="/var/lib/kubelet/pods/ccc4931c-f91c-460e-961d-074196743645/volumes" Oct 06 21:47:49 crc kubenswrapper[5014]: E1006 21:47:49.643916 5014 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Oct 06 21:47:49 crc kubenswrapper[5014]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/5581f1cf-4f5f-4042-a04b-22a966f20b23/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Oct 06 21:47:49 crc kubenswrapper[5014]: > podSandboxID="42337f47576fab0d858cf0c2d4c85b206d511fd22ab1186abf34ecbc74685f4e" Oct 06 21:47:49 crc kubenswrapper[5014]: E1006 21:47:49.644093 5014 kuberuntime_manager.go:1274] "Unhandled Error" err=< Oct 06 21:47:49 crc kubenswrapper[5014]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6kvln,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-644597f84c-gc96s_openstack(5581f1cf-4f5f-4042-a04b-22a966f20b23): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/5581f1cf-4f5f-4042-a04b-22a966f20b23/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Oct 06 21:47:49 crc kubenswrapper[5014]: > logger="UnhandledError" Oct 06 21:47:49 crc kubenswrapper[5014]: E1006 21:47:49.646054 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/5581f1cf-4f5f-4042-a04b-22a966f20b23/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-644597f84c-gc96s" podUID="5581f1cf-4f5f-4042-a04b-22a966f20b23" Oct 06 21:47:50 crc kubenswrapper[5014]: I1006 21:47:50.033174 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 06 21:47:50 crc kubenswrapper[5014]: I1006 21:47:50.245806 5014 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 06 21:47:50 crc kubenswrapper[5014]: I1006 21:47:50.259738 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-fwbdt" event={"ID":"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345","Type":"ContainerStarted","Data":"9c266677ca4f4946e15eb4c16ea59938797fcc27820ff4d9ca1f4c4e6c1b8262"} Oct 06 21:47:50 crc kubenswrapper[5014]: I1006 21:47:50.261961 5014 generic.go:334] "Generic (PLEG): container finished" podID="9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c" containerID="ec3f13e3802c96f3ce80b92ccfb25ad8569b49912dcf79ff518f1cede75a6345" exitCode=0 Oct 06 21:47:50 crc kubenswrapper[5014]: I1006 21:47:50.262053 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77597f887-ctt8h" event={"ID":"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c","Type":"ContainerDied","Data":"ec3f13e3802c96f3ce80b92ccfb25ad8569b49912dcf79ff518f1cede75a6345"} Oct 06 21:47:50 crc kubenswrapper[5014]: I1006 21:47:50.266792 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1132a0d0-bc9b-430d-a89e-33455c763b3c","Type":"ContainerStarted","Data":"688f79fff94f7065f9ed863518dfb80e9ef98e776cb4324a339f69c71e6bcb3a"} Oct 06 21:47:50 crc kubenswrapper[5014]: I1006 21:47:50.268917 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c7d1001f-b56b-4d52-88bc-4f23831c3509","Type":"ContainerStarted","Data":"585b70c5b2c537198fef8bf78d157546bd8594061a8f3754f0065278ca9e18d6"} Oct 06 21:47:51 crc kubenswrapper[5014]: I1006 21:47:51.735280 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:47:51 crc kubenswrapper[5014]: I1006 21:47:51.735875 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:47:51 crc kubenswrapper[5014]: I1006 21:47:51.735921 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:47:51 crc kubenswrapper[5014]: I1006 21:47:51.736530 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"067aa2b7990476eda8f037f84b90fe4366def589071101170672b653a6809121"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 21:47:51 crc kubenswrapper[5014]: I1006 21:47:51.736579 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://067aa2b7990476eda8f037f84b90fe4366def589071101170672b653a6809121" gracePeriod=600 Oct 06 21:47:52 crc kubenswrapper[5014]: I1006 21:47:52.289792 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="067aa2b7990476eda8f037f84b90fe4366def589071101170672b653a6809121" exitCode=0 Oct 06 21:47:52 crc kubenswrapper[5014]: I1006 21:47:52.289847 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"067aa2b7990476eda8f037f84b90fe4366def589071101170672b653a6809121"} Oct 06 21:47:52 crc kubenswrapper[5014]: I1006 21:47:52.289924 5014 scope.go:117] "RemoveContainer" containerID="5d102177c0ce6793970da1b086882b162bbee28f8415589d9583b1584188a7df" Oct 06 21:47:57 crc kubenswrapper[5014]: I1006 21:47:57.346521 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3b18812d-9eec-4254-8633-b40f55244e47","Type":"ContainerStarted","Data":"6437ea266193af7f5429b5e22c496e67c18b4a8bbe1772f6583ef433e1d820e1"} Oct 06 21:47:57 crc kubenswrapper[5014]: I1006 21:47:57.349473 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-644597f84c-gc96s" event={"ID":"5581f1cf-4f5f-4042-a04b-22a966f20b23","Type":"ContainerStarted","Data":"b20be13d92f6a6c922bf691e28efe87c3aba061b6e9f183ecb1fe75daf0aa32a"} Oct 06 21:47:57 crc kubenswrapper[5014]: I1006 21:47:57.349755 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-644597f84c-gc96s" Oct 06 21:47:57 crc kubenswrapper[5014]: I1006 21:47:57.353666 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c7d1001f-b56b-4d52-88bc-4f23831c3509","Type":"ContainerStarted","Data":"9bdf32744d43a96f7be12f93ad6660d8bbb3d324702e5bee26e36c2cea2725ce"} Oct 06 21:47:57 crc kubenswrapper[5014]: I1006 21:47:57.357196 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"26989bcf2919a89c6621b6af8723e17c2272f8a4a0f8b3ba02a253747e518022"} Oct 06 21:47:57 crc kubenswrapper[5014]: I1006 21:47:57.359889 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"81112a98-d817-4bf4-bb1e-288cb62e8577","Type":"ContainerStarted","Data":"96444f2a32fb59138cfc1e622d46a7508491a379c1bf7d6e3ed57a0b844c47c6"} Oct 06 21:47:57 crc kubenswrapper[5014]: I1006 21:47:57.360022 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Oct 06 21:47:57 crc kubenswrapper[5014]: I1006 21:47:57.362529 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd","Type":"ContainerStarted","Data":"1251818fc119fa5d31c10f6c1c00d043022d832df9c4ffca7ca60f60fea4e5af"} Oct 06 21:47:57 crc kubenswrapper[5014]: I1006 21:47:57.363984 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"cd16c866-91b0-4261-a084-7a96ac597c04","Type":"ContainerStarted","Data":"cdc30334797cf0d0a1abb5ab3b3e87dbac8996fc837919c9242d311431f403b2"} Oct 06 21:47:57 crc kubenswrapper[5014]: I1006 21:47:57.364095 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Oct 06 21:47:57 crc kubenswrapper[5014]: I1006 21:47:57.368377 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1132a0d0-bc9b-430d-a89e-33455c763b3c","Type":"ContainerStarted","Data":"a6e535773cd137a5bab0a6870ca512a9e234a397494ec79686683eaaade66b11"} Oct 06 21:47:57 crc kubenswrapper[5014]: I1006 21:47:57.432908 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-644597f84c-gc96s" podStartSLOduration=14.08283254 podStartE2EDuration="22.432885389s" podCreationTimestamp="2025-10-06 21:47:35 +0000 UTC" firstStartedPulling="2025-10-06 21:47:39.706604779 +0000 UTC m=+1004.999641513" lastFinishedPulling="2025-10-06 21:47:48.056657628 +0000 UTC m=+1013.349694362" observedRunningTime="2025-10-06 21:47:57.42881839 +0000 UTC m=+1022.721855124" watchObservedRunningTime="2025-10-06 21:47:57.432885389 +0000 UTC m=+1022.725922133" Oct 06 21:47:57 crc kubenswrapper[5014]: I1006 21:47:57.455858 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=8.923440364 podStartE2EDuration="16.455829168s" podCreationTimestamp="2025-10-06 21:47:41 +0000 UTC" firstStartedPulling="2025-10-06 21:47:49.15683468 +0000 UTC m=+1014.449871404" lastFinishedPulling="2025-10-06 21:47:56.689223474 +0000 UTC m=+1021.982260208" observedRunningTime="2025-10-06 21:47:57.452313016 +0000 UTC m=+1022.745349750" watchObservedRunningTime="2025-10-06 21:47:57.455829168 +0000 UTC m=+1022.748865922" Oct 06 21:47:57 crc kubenswrapper[5014]: I1006 21:47:57.500685 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=11.329245185 podStartE2EDuration="18.50065712s" podCreationTimestamp="2025-10-06 21:47:39 +0000 UTC" firstStartedPulling="2025-10-06 21:47:49.163258654 +0000 UTC m=+1014.456295388" lastFinishedPulling="2025-10-06 21:47:56.334670569 +0000 UTC m=+1021.627707323" observedRunningTime="2025-10-06 21:47:57.497273844 +0000 UTC m=+1022.790310578" watchObservedRunningTime="2025-10-06 21:47:57.50065712 +0000 UTC m=+1022.793693864" Oct 06 21:47:58 crc kubenswrapper[5014]: I1006 21:47:58.382190 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c8f59d7d-f71b-46b0-bd32-476a2517a3b6","Type":"ContainerStarted","Data":"09d7c2ebf6323f6087d0027d356d50b1abd8029b6b521b27fd0322faa1293a5d"} Oct 06 21:47:58 crc kubenswrapper[5014]: I1006 21:47:58.388517 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77597f887-ctt8h" event={"ID":"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c","Type":"ContainerStarted","Data":"456dd0ade0f127576078aaf7d121748e52e5537bf83be4051acde44626b46020"} Oct 06 21:47:58 crc kubenswrapper[5014]: I1006 21:47:58.388808 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77597f887-ctt8h" Oct 06 21:47:58 crc kubenswrapper[5014]: I1006 21:47:58.391226 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f4vpp" event={"ID":"74db136d-3445-4a7e-bcae-4645888ec806","Type":"ContainerStarted","Data":"f4279eb17667d99549974582ac2811d308ce6f778194f59eb6a9ca77feb9eca8"} Oct 06 21:47:58 crc kubenswrapper[5014]: I1006 21:47:58.391597 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-f4vpp" Oct 06 21:47:58 crc kubenswrapper[5014]: I1006 21:47:58.394143 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4b977fc8-6c11-41e6-9500-f0da2d66aea1","Type":"ContainerStarted","Data":"166b1c2d8ef910ca7a9787911a267a186c4cd418c36eafd7d791b96769ac9953"} Oct 06 21:47:58 crc kubenswrapper[5014]: I1006 21:47:58.396694 5014 generic.go:334] "Generic (PLEG): container finished" podID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerID="f6e5006398e333de1633332d6b3c0f4a9fbd91041ff9cc5f2132f9ac4ddff2c7" exitCode=0 Oct 06 21:47:58 crc kubenswrapper[5014]: I1006 21:47:58.398074 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-fwbdt" event={"ID":"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345","Type":"ContainerDied","Data":"f6e5006398e333de1633332d6b3c0f4a9fbd91041ff9cc5f2132f9ac4ddff2c7"} Oct 06 21:47:58 crc kubenswrapper[5014]: I1006 21:47:58.451389 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77597f887-ctt8h" podStartSLOduration=23.451363698 podStartE2EDuration="23.451363698s" podCreationTimestamp="2025-10-06 21:47:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:47:58.446169263 +0000 UTC m=+1023.739206007" watchObservedRunningTime="2025-10-06 21:47:58.451363698 +0000 UTC m=+1023.744400442" Oct 06 21:47:58 crc kubenswrapper[5014]: I1006 21:47:58.504406 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-f4vpp" podStartSLOduration=6.00487519 podStartE2EDuration="13.504384071s" podCreationTimestamp="2025-10-06 21:47:45 +0000 UTC" firstStartedPulling="2025-10-06 21:47:49.187331137 +0000 UTC m=+1014.480367871" lastFinishedPulling="2025-10-06 21:47:56.686840018 +0000 UTC m=+1021.979876752" observedRunningTime="2025-10-06 21:47:58.495416787 +0000 UTC m=+1023.788453541" watchObservedRunningTime="2025-10-06 21:47:58.504384071 +0000 UTC m=+1023.797420825" Oct 06 21:47:59 crc kubenswrapper[5014]: I1006 21:47:59.409748 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-fwbdt" event={"ID":"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345","Type":"ContainerStarted","Data":"54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c"} Oct 06 21:47:59 crc kubenswrapper[5014]: I1006 21:47:59.410170 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-fwbdt" event={"ID":"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345","Type":"ContainerStarted","Data":"52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc"} Oct 06 21:47:59 crc kubenswrapper[5014]: I1006 21:47:59.410216 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:59 crc kubenswrapper[5014]: I1006 21:47:59.410237 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:47:59 crc kubenswrapper[5014]: I1006 21:47:59.435807 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-fwbdt" podStartSLOduration=7.062345478 podStartE2EDuration="14.435776046s" podCreationTimestamp="2025-10-06 21:47:45 +0000 UTC" firstStartedPulling="2025-10-06 21:47:49.302367859 +0000 UTC m=+1014.595404593" lastFinishedPulling="2025-10-06 21:47:56.675798427 +0000 UTC m=+1021.968835161" observedRunningTime="2025-10-06 21:47:59.428750963 +0000 UTC m=+1024.721787707" watchObservedRunningTime="2025-10-06 21:47:59.435776046 +0000 UTC m=+1024.728812800" Oct 06 21:48:01 crc kubenswrapper[5014]: I1006 21:48:01.432550 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1132a0d0-bc9b-430d-a89e-33455c763b3c","Type":"ContainerStarted","Data":"c8aaf100576649e3e25587bb86a9bf5da8f2697e6e40a96847857214fba91a73"} Oct 06 21:48:01 crc kubenswrapper[5014]: I1006 21:48:01.438031 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c7d1001f-b56b-4d52-88bc-4f23831c3509","Type":"ContainerStarted","Data":"19f11c3ccbb300a39fa2c1f9012ea3f2ed6574d0a166bcfbc5e489893f193296"} Oct 06 21:48:01 crc kubenswrapper[5014]: I1006 21:48:01.469981 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=2.952361632 podStartE2EDuration="13.469959595s" podCreationTimestamp="2025-10-06 21:47:48 +0000 UTC" firstStartedPulling="2025-10-06 21:47:50.245048761 +0000 UTC m=+1015.538085505" lastFinishedPulling="2025-10-06 21:48:00.762646694 +0000 UTC m=+1026.055683468" observedRunningTime="2025-10-06 21:48:01.462674844 +0000 UTC m=+1026.755711608" watchObservedRunningTime="2025-10-06 21:48:01.469959595 +0000 UTC m=+1026.762996349" Oct 06 21:48:01 crc kubenswrapper[5014]: I1006 21:48:01.473196 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Oct 06 21:48:01 crc kubenswrapper[5014]: I1006 21:48:01.498341 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=6.011600151 podStartE2EDuration="17.498316666s" podCreationTimestamp="2025-10-06 21:47:44 +0000 UTC" firstStartedPulling="2025-10-06 21:47:49.284884873 +0000 UTC m=+1014.577921607" lastFinishedPulling="2025-10-06 21:48:00.771601388 +0000 UTC m=+1026.064638122" observedRunningTime="2025-10-06 21:48:01.497436848 +0000 UTC m=+1026.790473592" watchObservedRunningTime="2025-10-06 21:48:01.498316666 +0000 UTC m=+1026.791353420" Oct 06 21:48:01 crc kubenswrapper[5014]: I1006 21:48:01.538068 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.452357 5014 generic.go:334] "Generic (PLEG): container finished" podID="3b18812d-9eec-4254-8633-b40f55244e47" containerID="6437ea266193af7f5429b5e22c496e67c18b4a8bbe1772f6583ef433e1d820e1" exitCode=0 Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.452464 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3b18812d-9eec-4254-8633-b40f55244e47","Type":"ContainerDied","Data":"6437ea266193af7f5429b5e22c496e67c18b4a8bbe1772f6583ef433e1d820e1"} Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.456859 5014 generic.go:334] "Generic (PLEG): container finished" podID="2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" containerID="1251818fc119fa5d31c10f6c1c00d043022d832df9c4ffca7ca60f60fea4e5af" exitCode=0 Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.457000 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd","Type":"ContainerDied","Data":"1251818fc119fa5d31c10f6c1c00d043022d832df9c4ffca7ca60f60fea4e5af"} Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.457932 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.535861 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.825189 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-644597f84c-gc96s"] Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.825746 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-644597f84c-gc96s" podUID="5581f1cf-4f5f-4042-a04b-22a966f20b23" containerName="dnsmasq-dns" containerID="cri-o://b20be13d92f6a6c922bf691e28efe87c3aba061b6e9f183ecb1fe75daf0aa32a" gracePeriod=10 Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.827779 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-644597f84c-gc96s" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.852826 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54c9499b4f-pgvf8"] Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.854232 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.858740 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-9zq5k"] Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.859543 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.862195 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.863705 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.872407 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54c9499b4f-pgvf8"] Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.879678 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-9zq5k"] Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.991535 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d674559-08b5-41c9-8783-a5e42504fb3e-config\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.991593 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/0d674559-08b5-41c9-8783-a5e42504fb3e-ovs-rundir\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.991695 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-dns-svc\") pod \"dnsmasq-dns-54c9499b4f-pgvf8\" (UID: \"ee1f413c-b61e-4445-9412-bf1c45269ff1\") " pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.991741 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d674559-08b5-41c9-8783-a5e42504fb3e-combined-ca-bundle\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.991781 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qkr9\" (UniqueName: \"kubernetes.io/projected/ee1f413c-b61e-4445-9412-bf1c45269ff1-kube-api-access-9qkr9\") pod \"dnsmasq-dns-54c9499b4f-pgvf8\" (UID: \"ee1f413c-b61e-4445-9412-bf1c45269ff1\") " pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.991797 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-ovsdbserver-sb\") pod \"dnsmasq-dns-54c9499b4f-pgvf8\" (UID: \"ee1f413c-b61e-4445-9412-bf1c45269ff1\") " pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.991819 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/0d674559-08b5-41c9-8783-a5e42504fb3e-ovn-rundir\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.991843 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d674559-08b5-41c9-8783-a5e42504fb3e-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.991862 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-config\") pod \"dnsmasq-dns-54c9499b4f-pgvf8\" (UID: \"ee1f413c-b61e-4445-9412-bf1c45269ff1\") " pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:02 crc kubenswrapper[5014]: I1006 21:48:02.991882 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8fjs\" (UniqueName: \"kubernetes.io/projected/0d674559-08b5-41c9-8783-a5e42504fb3e-kube-api-access-h8fjs\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.094242 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-dns-svc\") pod \"dnsmasq-dns-54c9499b4f-pgvf8\" (UID: \"ee1f413c-b61e-4445-9412-bf1c45269ff1\") " pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.094731 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d674559-08b5-41c9-8783-a5e42504fb3e-combined-ca-bundle\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.094829 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qkr9\" (UniqueName: \"kubernetes.io/projected/ee1f413c-b61e-4445-9412-bf1c45269ff1-kube-api-access-9qkr9\") pod \"dnsmasq-dns-54c9499b4f-pgvf8\" (UID: \"ee1f413c-b61e-4445-9412-bf1c45269ff1\") " pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.094850 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-ovsdbserver-sb\") pod \"dnsmasq-dns-54c9499b4f-pgvf8\" (UID: \"ee1f413c-b61e-4445-9412-bf1c45269ff1\") " pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.094881 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/0d674559-08b5-41c9-8783-a5e42504fb3e-ovn-rundir\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.095607 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d674559-08b5-41c9-8783-a5e42504fb3e-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.095671 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-config\") pod \"dnsmasq-dns-54c9499b4f-pgvf8\" (UID: \"ee1f413c-b61e-4445-9412-bf1c45269ff1\") " pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.095678 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-dns-svc\") pod \"dnsmasq-dns-54c9499b4f-pgvf8\" (UID: \"ee1f413c-b61e-4445-9412-bf1c45269ff1\") " pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.095710 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8fjs\" (UniqueName: \"kubernetes.io/projected/0d674559-08b5-41c9-8783-a5e42504fb3e-kube-api-access-h8fjs\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.095894 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d674559-08b5-41c9-8783-a5e42504fb3e-config\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.095970 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/0d674559-08b5-41c9-8783-a5e42504fb3e-ovs-rundir\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.096102 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/0d674559-08b5-41c9-8783-a5e42504fb3e-ovn-rundir\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.096125 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/0d674559-08b5-41c9-8783-a5e42504fb3e-ovs-rundir\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.096173 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-ovsdbserver-sb\") pod \"dnsmasq-dns-54c9499b4f-pgvf8\" (UID: \"ee1f413c-b61e-4445-9412-bf1c45269ff1\") " pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.096952 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d674559-08b5-41c9-8783-a5e42504fb3e-config\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.101973 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-config\") pod \"dnsmasq-dns-54c9499b4f-pgvf8\" (UID: \"ee1f413c-b61e-4445-9412-bf1c45269ff1\") " pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.107856 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d674559-08b5-41c9-8783-a5e42504fb3e-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.116373 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d674559-08b5-41c9-8783-a5e42504fb3e-combined-ca-bundle\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.120128 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qkr9\" (UniqueName: \"kubernetes.io/projected/ee1f413c-b61e-4445-9412-bf1c45269ff1-kube-api-access-9qkr9\") pod \"dnsmasq-dns-54c9499b4f-pgvf8\" (UID: \"ee1f413c-b61e-4445-9412-bf1c45269ff1\") " pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.120917 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8fjs\" (UniqueName: \"kubernetes.io/projected/0d674559-08b5-41c9-8783-a5e42504fb3e-kube-api-access-h8fjs\") pod \"ovn-controller-metrics-9zq5k\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.207863 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-644597f84c-gc96s" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.217080 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.225380 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.227324 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77597f887-ctt8h"] Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.227521 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-77597f887-ctt8h" podUID="9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c" containerName="dnsmasq-dns" containerID="cri-o://456dd0ade0f127576078aaf7d121748e52e5537bf83be4051acde44626b46020" gracePeriod=10 Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.230521 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77597f887-ctt8h" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.258484 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bc45f6dcf-ns4nt"] Oct 06 21:48:03 crc kubenswrapper[5014]: E1006 21:48:03.258814 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5581f1cf-4f5f-4042-a04b-22a966f20b23" containerName="init" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.258826 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5581f1cf-4f5f-4042-a04b-22a966f20b23" containerName="init" Oct 06 21:48:03 crc kubenswrapper[5014]: E1006 21:48:03.258845 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5581f1cf-4f5f-4042-a04b-22a966f20b23" containerName="dnsmasq-dns" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.258851 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5581f1cf-4f5f-4042-a04b-22a966f20b23" containerName="dnsmasq-dns" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.259002 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5581f1cf-4f5f-4042-a04b-22a966f20b23" containerName="dnsmasq-dns" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.259745 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.265405 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bc45f6dcf-ns4nt"] Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.267999 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.298313 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5581f1cf-4f5f-4042-a04b-22a966f20b23-dns-svc\") pod \"5581f1cf-4f5f-4042-a04b-22a966f20b23\" (UID: \"5581f1cf-4f5f-4042-a04b-22a966f20b23\") " Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.300139 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6kvln\" (UniqueName: \"kubernetes.io/projected/5581f1cf-4f5f-4042-a04b-22a966f20b23-kube-api-access-6kvln\") pod \"5581f1cf-4f5f-4042-a04b-22a966f20b23\" (UID: \"5581f1cf-4f5f-4042-a04b-22a966f20b23\") " Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.300940 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5581f1cf-4f5f-4042-a04b-22a966f20b23-config\") pod \"5581f1cf-4f5f-4042-a04b-22a966f20b23\" (UID: \"5581f1cf-4f5f-4042-a04b-22a966f20b23\") " Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.303731 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5581f1cf-4f5f-4042-a04b-22a966f20b23-kube-api-access-6kvln" (OuterVolumeSpecName: "kube-api-access-6kvln") pod "5581f1cf-4f5f-4042-a04b-22a966f20b23" (UID: "5581f1cf-4f5f-4042-a04b-22a966f20b23"). InnerVolumeSpecName "kube-api-access-6kvln". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.331912 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5581f1cf-4f5f-4042-a04b-22a966f20b23-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5581f1cf-4f5f-4042-a04b-22a966f20b23" (UID: "5581f1cf-4f5f-4042-a04b-22a966f20b23"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.358591 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5581f1cf-4f5f-4042-a04b-22a966f20b23-config" (OuterVolumeSpecName: "config") pod "5581f1cf-4f5f-4042-a04b-22a966f20b23" (UID: "5581f1cf-4f5f-4042-a04b-22a966f20b23"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.402655 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-config\") pod \"dnsmasq-dns-bc45f6dcf-ns4nt\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.403150 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-ovsdbserver-nb\") pod \"dnsmasq-dns-bc45f6dcf-ns4nt\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.403243 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-dns-svc\") pod \"dnsmasq-dns-bc45f6dcf-ns4nt\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.403276 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwmkq\" (UniqueName: \"kubernetes.io/projected/9786c139-7d1f-420a-a601-fa3f046f7bc0-kube-api-access-cwmkq\") pod \"dnsmasq-dns-bc45f6dcf-ns4nt\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.403307 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-ovsdbserver-sb\") pod \"dnsmasq-dns-bc45f6dcf-ns4nt\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.403364 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6kvln\" (UniqueName: \"kubernetes.io/projected/5581f1cf-4f5f-4042-a04b-22a966f20b23-kube-api-access-6kvln\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.403387 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5581f1cf-4f5f-4042-a04b-22a966f20b23-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.403401 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5581f1cf-4f5f-4042-a04b-22a966f20b23-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.495844 5014 generic.go:334] "Generic (PLEG): container finished" podID="5581f1cf-4f5f-4042-a04b-22a966f20b23" containerID="b20be13d92f6a6c922bf691e28efe87c3aba061b6e9f183ecb1fe75daf0aa32a" exitCode=0 Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.495954 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-644597f84c-gc96s" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.502810 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3b18812d-9eec-4254-8633-b40f55244e47","Type":"ContainerStarted","Data":"1ca1df3e861d5336e4515ca03c187e532ee4429553cf2f9930ba7e1d9925c254"} Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.502847 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd","Type":"ContainerStarted","Data":"40dcdb5d7c50cb4d2ec51f4ef5316c1e1fba2901ad8cf7db90116b6c873b152a"} Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.502858 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-644597f84c-gc96s" event={"ID":"5581f1cf-4f5f-4042-a04b-22a966f20b23","Type":"ContainerDied","Data":"b20be13d92f6a6c922bf691e28efe87c3aba061b6e9f183ecb1fe75daf0aa32a"} Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.502872 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-644597f84c-gc96s" event={"ID":"5581f1cf-4f5f-4042-a04b-22a966f20b23","Type":"ContainerDied","Data":"42337f47576fab0d858cf0c2d4c85b206d511fd22ab1186abf34ecbc74685f4e"} Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.502889 5014 scope.go:117] "RemoveContainer" containerID="b20be13d92f6a6c922bf691e28efe87c3aba061b6e9f183ecb1fe75daf0aa32a" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.502954 5014 generic.go:334] "Generic (PLEG): container finished" podID="9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c" containerID="456dd0ade0f127576078aaf7d121748e52e5537bf83be4051acde44626b46020" exitCode=0 Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.503341 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77597f887-ctt8h" event={"ID":"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c","Type":"ContainerDied","Data":"456dd0ade0f127576078aaf7d121748e52e5537bf83be4051acde44626b46020"} Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.506437 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-ovsdbserver-nb\") pod \"dnsmasq-dns-bc45f6dcf-ns4nt\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.506556 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-dns-svc\") pod \"dnsmasq-dns-bc45f6dcf-ns4nt\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.506595 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwmkq\" (UniqueName: \"kubernetes.io/projected/9786c139-7d1f-420a-a601-fa3f046f7bc0-kube-api-access-cwmkq\") pod \"dnsmasq-dns-bc45f6dcf-ns4nt\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.506642 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-ovsdbserver-sb\") pod \"dnsmasq-dns-bc45f6dcf-ns4nt\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.506687 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-config\") pod \"dnsmasq-dns-bc45f6dcf-ns4nt\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.508207 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-ovsdbserver-sb\") pod \"dnsmasq-dns-bc45f6dcf-ns4nt\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.508327 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-ovsdbserver-nb\") pod \"dnsmasq-dns-bc45f6dcf-ns4nt\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.510854 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-config\") pod \"dnsmasq-dns-bc45f6dcf-ns4nt\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.511870 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-dns-svc\") pod \"dnsmasq-dns-bc45f6dcf-ns4nt\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.525590 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwmkq\" (UniqueName: \"kubernetes.io/projected/9786c139-7d1f-420a-a601-fa3f046f7bc0-kube-api-access-cwmkq\") pod \"dnsmasq-dns-bc45f6dcf-ns4nt\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.532009 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=19.913340757 podStartE2EDuration="27.531989969s" podCreationTimestamp="2025-10-06 21:47:36 +0000 UTC" firstStartedPulling="2025-10-06 21:47:48.957511313 +0000 UTC m=+1014.250548047" lastFinishedPulling="2025-10-06 21:47:56.576160525 +0000 UTC m=+1021.869197259" observedRunningTime="2025-10-06 21:48:03.531914747 +0000 UTC m=+1028.824951491" watchObservedRunningTime="2025-10-06 21:48:03.531989969 +0000 UTC m=+1028.825026703" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.574091 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=18.158711815 podStartE2EDuration="25.574069925s" podCreationTimestamp="2025-10-06 21:47:38 +0000 UTC" firstStartedPulling="2025-10-06 21:47:49.189020911 +0000 UTC m=+1014.482057645" lastFinishedPulling="2025-10-06 21:47:56.604379001 +0000 UTC m=+1021.897415755" observedRunningTime="2025-10-06 21:48:03.570646966 +0000 UTC m=+1028.863683710" watchObservedRunningTime="2025-10-06 21:48:03.574069925 +0000 UTC m=+1028.867106659" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.581252 5014 scope.go:117] "RemoveContainer" containerID="7ce9edc49743e08bed8809b10f9979dcf1260da3bcb647d3a6209f71507e3d1e" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.596273 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-644597f84c-gc96s"] Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.620296 5014 scope.go:117] "RemoveContainer" containerID="b20be13d92f6a6c922bf691e28efe87c3aba061b6e9f183ecb1fe75daf0aa32a" Oct 06 21:48:03 crc kubenswrapper[5014]: E1006 21:48:03.624876 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b20be13d92f6a6c922bf691e28efe87c3aba061b6e9f183ecb1fe75daf0aa32a\": container with ID starting with b20be13d92f6a6c922bf691e28efe87c3aba061b6e9f183ecb1fe75daf0aa32a not found: ID does not exist" containerID="b20be13d92f6a6c922bf691e28efe87c3aba061b6e9f183ecb1fe75daf0aa32a" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.624943 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b20be13d92f6a6c922bf691e28efe87c3aba061b6e9f183ecb1fe75daf0aa32a"} err="failed to get container status \"b20be13d92f6a6c922bf691e28efe87c3aba061b6e9f183ecb1fe75daf0aa32a\": rpc error: code = NotFound desc = could not find container \"b20be13d92f6a6c922bf691e28efe87c3aba061b6e9f183ecb1fe75daf0aa32a\": container with ID starting with b20be13d92f6a6c922bf691e28efe87c3aba061b6e9f183ecb1fe75daf0aa32a not found: ID does not exist" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.624981 5014 scope.go:117] "RemoveContainer" containerID="7ce9edc49743e08bed8809b10f9979dcf1260da3bcb647d3a6209f71507e3d1e" Oct 06 21:48:03 crc kubenswrapper[5014]: E1006 21:48:03.625893 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ce9edc49743e08bed8809b10f9979dcf1260da3bcb647d3a6209f71507e3d1e\": container with ID starting with 7ce9edc49743e08bed8809b10f9979dcf1260da3bcb647d3a6209f71507e3d1e not found: ID does not exist" containerID="7ce9edc49743e08bed8809b10f9979dcf1260da3bcb647d3a6209f71507e3d1e" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.625930 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ce9edc49743e08bed8809b10f9979dcf1260da3bcb647d3a6209f71507e3d1e"} err="failed to get container status \"7ce9edc49743e08bed8809b10f9979dcf1260da3bcb647d3a6209f71507e3d1e\": rpc error: code = NotFound desc = could not find container \"7ce9edc49743e08bed8809b10f9979dcf1260da3bcb647d3a6209f71507e3d1e\": container with ID starting with 7ce9edc49743e08bed8809b10f9979dcf1260da3bcb647d3a6209f71507e3d1e not found: ID does not exist" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.630687 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-644597f84c-gc96s"] Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.661528 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.723256 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-9zq5k"] Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.767983 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77597f887-ctt8h" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.813331 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-config\") pod \"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c\" (UID: \"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c\") " Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.813460 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4t8h8\" (UniqueName: \"kubernetes.io/projected/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-kube-api-access-4t8h8\") pod \"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c\" (UID: \"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c\") " Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.813566 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-dns-svc\") pod \"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c\" (UID: \"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c\") " Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.817387 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-kube-api-access-4t8h8" (OuterVolumeSpecName: "kube-api-access-4t8h8") pod "9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c" (UID: "9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c"). InnerVolumeSpecName "kube-api-access-4t8h8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.852638 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54c9499b4f-pgvf8"] Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.856144 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c" (UID: "9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:03 crc kubenswrapper[5014]: W1006 21:48:03.857821 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee1f413c_b61e_4445_9412_bf1c45269ff1.slice/crio-d52cbac204a110cbdcb715de21ca7897c9037cf04a0e003c2148cbc42f420913 WatchSource:0}: Error finding container d52cbac204a110cbdcb715de21ca7897c9037cf04a0e003c2148cbc42f420913: Status 404 returned error can't find the container with id d52cbac204a110cbdcb715de21ca7897c9037cf04a0e003c2148cbc42f420913 Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.870766 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-config" (OuterVolumeSpecName: "config") pod "9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c" (UID: "9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.915742 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.915767 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4t8h8\" (UniqueName: \"kubernetes.io/projected/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-kube-api-access-4t8h8\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:03 crc kubenswrapper[5014]: I1006 21:48:03.916015 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.132165 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bc45f6dcf-ns4nt"] Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.151168 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.197830 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.516227 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" event={"ID":"9786c139-7d1f-420a-a601-fa3f046f7bc0","Type":"ContainerStarted","Data":"cba547220805c2d9b7c6e07eea919b31501fe698c0a3ca4b969c8033b0e1cb7f"} Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.519441 5014 generic.go:334] "Generic (PLEG): container finished" podID="ee1f413c-b61e-4445-9412-bf1c45269ff1" containerID="cd09446e82c8ef7141bfb67551f10fab53c22ad1bec49f9b068e03949fcf7436" exitCode=0 Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.519558 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" event={"ID":"ee1f413c-b61e-4445-9412-bf1c45269ff1","Type":"ContainerDied","Data":"cd09446e82c8ef7141bfb67551f10fab53c22ad1bec49f9b068e03949fcf7436"} Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.519596 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" event={"ID":"ee1f413c-b61e-4445-9412-bf1c45269ff1","Type":"ContainerStarted","Data":"d52cbac204a110cbdcb715de21ca7897c9037cf04a0e003c2148cbc42f420913"} Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.526051 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-9zq5k" event={"ID":"0d674559-08b5-41c9-8783-a5e42504fb3e","Type":"ContainerStarted","Data":"4548df78c36932efb3465251204787ed31111710ed70b8eb0c470d23a627a6c7"} Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.526100 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-9zq5k" event={"ID":"0d674559-08b5-41c9-8783-a5e42504fb3e","Type":"ContainerStarted","Data":"460eba36cd0f4513831dd10fbdaac25af83e5140d9c623eb9e0285331e98f5be"} Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.529419 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77597f887-ctt8h" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.532297 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77597f887-ctt8h" event={"ID":"9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c","Type":"ContainerDied","Data":"e7d6054aae787b24008aee6ea0ded3e8c54951463a1336d42d43058df185f36c"} Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.532369 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.532401 5014 scope.go:117] "RemoveContainer" containerID="456dd0ade0f127576078aaf7d121748e52e5537bf83be4051acde44626b46020" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.578431 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-9zq5k" podStartSLOduration=2.578407774 podStartE2EDuration="2.578407774s" podCreationTimestamp="2025-10-06 21:48:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:48:04.575878714 +0000 UTC m=+1029.868915468" watchObservedRunningTime="2025-10-06 21:48:04.578407774 +0000 UTC m=+1029.871444508" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.580647 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.661676 5014 scope.go:117] "RemoveContainer" containerID="ec3f13e3802c96f3ce80b92ccfb25ad8569b49912dcf79ff518f1cede75a6345" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.710756 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77597f887-ctt8h"] Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.731056 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77597f887-ctt8h"] Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.771685 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Oct 06 21:48:04 crc kubenswrapper[5014]: E1006 21:48:04.772135 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c" containerName="init" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.772158 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c" containerName="init" Oct 06 21:48:04 crc kubenswrapper[5014]: E1006 21:48:04.772186 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c" containerName="dnsmasq-dns" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.772193 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c" containerName="dnsmasq-dns" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.772355 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c" containerName="dnsmasq-dns" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.773327 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.776997 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.777305 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-4v8vv" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.777569 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.777791 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.787102 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.930591 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.930655 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.930720 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzwp7\" (UniqueName: \"kubernetes.io/projected/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-kube-api-access-tzwp7\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.930752 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-scripts\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.930786 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.930806 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-config\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:04 crc kubenswrapper[5014]: I1006 21:48:04.930837 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.033697 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.034660 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.034362 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.035272 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.035516 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzwp7\" (UniqueName: \"kubernetes.io/projected/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-kube-api-access-tzwp7\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.035754 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-scripts\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.035945 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.036108 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-config\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.037100 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-scripts\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.037926 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-config\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.039059 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.039566 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.040663 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.051456 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzwp7\" (UniqueName: \"kubernetes.io/projected/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-kube-api-access-tzwp7\") pod \"ovn-northd-0\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " pod="openstack/ovn-northd-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.091321 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.144491 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.437925 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.495600 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5581f1cf-4f5f-4042-a04b-22a966f20b23" path="/var/lib/kubelet/pods/5581f1cf-4f5f-4042-a04b-22a966f20b23/volumes" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.496950 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c" path="/var/lib/kubelet/pods/9bcd5e47-c851-4fa7-b9ed-a0ed387dee8c/volumes" Oct 06 21:48:05 crc kubenswrapper[5014]: I1006 21:48:05.541582 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7","Type":"ContainerStarted","Data":"838b092f31e9d67188d165fd454fdb34ab0bb400cb117d86d9871925bed6ba7d"} Oct 06 21:48:06 crc kubenswrapper[5014]: I1006 21:48:06.554139 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" event={"ID":"9786c139-7d1f-420a-a601-fa3f046f7bc0","Type":"ContainerStarted","Data":"8dc6af6ae8b4f91e14ff13ef957fc50a85a639820e6da53ca3fb2c6470bcd6bc"} Oct 06 21:48:07 crc kubenswrapper[5014]: I1006 21:48:07.564338 5014 generic.go:334] "Generic (PLEG): container finished" podID="9786c139-7d1f-420a-a601-fa3f046f7bc0" containerID="8dc6af6ae8b4f91e14ff13ef957fc50a85a639820e6da53ca3fb2c6470bcd6bc" exitCode=0 Oct 06 21:48:07 crc kubenswrapper[5014]: I1006 21:48:07.564407 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" event={"ID":"9786c139-7d1f-420a-a601-fa3f046f7bc0","Type":"ContainerDied","Data":"8dc6af6ae8b4f91e14ff13ef957fc50a85a639820e6da53ca3fb2c6470bcd6bc"} Oct 06 21:48:08 crc kubenswrapper[5014]: I1006 21:48:08.307257 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Oct 06 21:48:08 crc kubenswrapper[5014]: I1006 21:48:08.307648 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Oct 06 21:48:09 crc kubenswrapper[5014]: I1006 21:48:09.585816 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" event={"ID":"ee1f413c-b61e-4445-9412-bf1c45269ff1","Type":"ContainerStarted","Data":"bd20acfaeec96f7c117a2081ec396835882b819287f64e79d48f59683b4501a9"} Oct 06 21:48:09 crc kubenswrapper[5014]: I1006 21:48:09.698364 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Oct 06 21:48:09 crc kubenswrapper[5014]: I1006 21:48:09.698748 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Oct 06 21:48:10 crc kubenswrapper[5014]: I1006 21:48:10.596750 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" event={"ID":"9786c139-7d1f-420a-a601-fa3f046f7bc0","Type":"ContainerStarted","Data":"49be2575edd42b3800377ea1ab0a1cfd121f2079f4ec100a62bcf8c067d0c918"} Oct 06 21:48:10 crc kubenswrapper[5014]: I1006 21:48:10.597428 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:10 crc kubenswrapper[5014]: I1006 21:48:10.622222 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" podStartSLOduration=8.622202249 podStartE2EDuration="8.622202249s" podCreationTimestamp="2025-10-06 21:48:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:48:10.617932094 +0000 UTC m=+1035.910968838" watchObservedRunningTime="2025-10-06 21:48:10.622202249 +0000 UTC m=+1035.915238983" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.608226 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7","Type":"ContainerStarted","Data":"d9b24ea6635bde477fa710678733d33fd5f1e17d35ee8b466a9fe4445920964c"} Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.608550 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7","Type":"ContainerStarted","Data":"a5aaa8e6da3bb1475013fcf9505c94fb4f157784a202b67b1fafdd6706d374db"} Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.618871 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.640294 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" podStartSLOduration=8.640275305 podStartE2EDuration="8.640275305s" podCreationTimestamp="2025-10-06 21:48:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:48:10.658111189 +0000 UTC m=+1035.951147923" watchObservedRunningTime="2025-10-06 21:48:11.640275305 +0000 UTC m=+1036.933312039" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.645947 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.446615756 podStartE2EDuration="7.645939294s" podCreationTimestamp="2025-10-06 21:48:04 +0000 UTC" firstStartedPulling="2025-10-06 21:48:05.463462269 +0000 UTC m=+1030.756498993" lastFinishedPulling="2025-10-06 21:48:10.662785797 +0000 UTC m=+1035.955822531" observedRunningTime="2025-10-06 21:48:11.638664184 +0000 UTC m=+1036.931700918" watchObservedRunningTime="2025-10-06 21:48:11.645939294 +0000 UTC m=+1036.938976028" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.685663 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54c9499b4f-pgvf8"] Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.723715 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57f58c7cff-sh7hf"] Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.724938 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.743604 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57f58c7cff-sh7hf"] Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.870998 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-ovsdbserver-nb\") pod \"dnsmasq-dns-57f58c7cff-sh7hf\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.871051 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-config\") pod \"dnsmasq-dns-57f58c7cff-sh7hf\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.871080 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-dns-svc\") pod \"dnsmasq-dns-57f58c7cff-sh7hf\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.871191 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mzqv\" (UniqueName: \"kubernetes.io/projected/3956e460-e179-438d-be81-7af1dc5fcfe8-kube-api-access-7mzqv\") pod \"dnsmasq-dns-57f58c7cff-sh7hf\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.871307 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-ovsdbserver-sb\") pod \"dnsmasq-dns-57f58c7cff-sh7hf\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.973393 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-ovsdbserver-nb\") pod \"dnsmasq-dns-57f58c7cff-sh7hf\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.973432 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-config\") pod \"dnsmasq-dns-57f58c7cff-sh7hf\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.973453 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-dns-svc\") pod \"dnsmasq-dns-57f58c7cff-sh7hf\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.973490 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mzqv\" (UniqueName: \"kubernetes.io/projected/3956e460-e179-438d-be81-7af1dc5fcfe8-kube-api-access-7mzqv\") pod \"dnsmasq-dns-57f58c7cff-sh7hf\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.973516 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-ovsdbserver-sb\") pod \"dnsmasq-dns-57f58c7cff-sh7hf\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.974345 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-ovsdbserver-sb\") pod \"dnsmasq-dns-57f58c7cff-sh7hf\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.974528 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-dns-svc\") pod \"dnsmasq-dns-57f58c7cff-sh7hf\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.974604 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-config\") pod \"dnsmasq-dns-57f58c7cff-sh7hf\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.975285 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-ovsdbserver-nb\") pod \"dnsmasq-dns-57f58c7cff-sh7hf\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:11 crc kubenswrapper[5014]: I1006 21:48:11.995876 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mzqv\" (UniqueName: \"kubernetes.io/projected/3956e460-e179-438d-be81-7af1dc5fcfe8-kube-api-access-7mzqv\") pod \"dnsmasq-dns-57f58c7cff-sh7hf\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.042994 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.409019 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.463965 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.549349 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57f58c7cff-sh7hf"] Oct 06 21:48:12 crc kubenswrapper[5014]: W1006 21:48:12.558787 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3956e460_e179_438d_be81_7af1dc5fcfe8.slice/crio-7d59fcb901e0e7587a415b46b9dcac3a54129f48dfe9bcdbdad735066b682d31 WatchSource:0}: Error finding container 7d59fcb901e0e7587a415b46b9dcac3a54129f48dfe9bcdbdad735066b682d31: Status 404 returned error can't find the container with id 7d59fcb901e0e7587a415b46b9dcac3a54129f48dfe9bcdbdad735066b682d31 Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.619102 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" event={"ID":"3956e460-e179-438d-be81-7af1dc5fcfe8","Type":"ContainerStarted","Data":"7d59fcb901e0e7587a415b46b9dcac3a54129f48dfe9bcdbdad735066b682d31"} Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.619538 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.619714 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" podUID="ee1f413c-b61e-4445-9412-bf1c45269ff1" containerName="dnsmasq-dns" containerID="cri-o://bd20acfaeec96f7c117a2081ec396835882b819287f64e79d48f59683b4501a9" gracePeriod=10 Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.854363 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.862267 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.864839 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.865366 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.869054 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-zlxpt" Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.869135 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.874905 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.997428 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/05220712-c8ae-4ac8-9c49-d74770367b33-lock\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.997513 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/05220712-c8ae-4ac8-9c49-d74770367b33-cache\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.997861 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzn7k\" (UniqueName: \"kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-kube-api-access-lzn7k\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.998146 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:12 crc kubenswrapper[5014]: I1006 21:48:12.998238 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.099343 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/05220712-c8ae-4ac8-9c49-d74770367b33-lock\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.099796 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/05220712-c8ae-4ac8-9c49-d74770367b33-cache\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.099858 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzn7k\" (UniqueName: \"kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-kube-api-access-lzn7k\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.099923 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.099956 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.099972 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/05220712-c8ae-4ac8-9c49-d74770367b33-lock\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:13 crc kubenswrapper[5014]: E1006 21:48:13.100144 5014 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 06 21:48:13 crc kubenswrapper[5014]: E1006 21:48:13.100174 5014 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 06 21:48:13 crc kubenswrapper[5014]: E1006 21:48:13.100228 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift podName:05220712-c8ae-4ac8-9c49-d74770367b33 nodeName:}" failed. No retries permitted until 2025-10-06 21:48:13.600210037 +0000 UTC m=+1038.893246771 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift") pod "swift-storage-0" (UID: "05220712-c8ae-4ac8-9c49-d74770367b33") : configmap "swift-ring-files" not found Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.100248 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/05220712-c8ae-4ac8-9c49-d74770367b33-cache\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.100420 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/swift-storage-0" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.107164 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.125116 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzn7k\" (UniqueName: \"kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-kube-api-access-lzn7k\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.173246 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.201552 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-config\") pod \"ee1f413c-b61e-4445-9412-bf1c45269ff1\" (UID: \"ee1f413c-b61e-4445-9412-bf1c45269ff1\") " Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.201641 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-ovsdbserver-sb\") pod \"ee1f413c-b61e-4445-9412-bf1c45269ff1\" (UID: \"ee1f413c-b61e-4445-9412-bf1c45269ff1\") " Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.201725 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-dns-svc\") pod \"ee1f413c-b61e-4445-9412-bf1c45269ff1\" (UID: \"ee1f413c-b61e-4445-9412-bf1c45269ff1\") " Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.201820 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9qkr9\" (UniqueName: \"kubernetes.io/projected/ee1f413c-b61e-4445-9412-bf1c45269ff1-kube-api-access-9qkr9\") pod \"ee1f413c-b61e-4445-9412-bf1c45269ff1\" (UID: \"ee1f413c-b61e-4445-9412-bf1c45269ff1\") " Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.207742 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee1f413c-b61e-4445-9412-bf1c45269ff1-kube-api-access-9qkr9" (OuterVolumeSpecName: "kube-api-access-9qkr9") pod "ee1f413c-b61e-4445-9412-bf1c45269ff1" (UID: "ee1f413c-b61e-4445-9412-bf1c45269ff1"). InnerVolumeSpecName "kube-api-access-9qkr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.250139 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-config" (OuterVolumeSpecName: "config") pod "ee1f413c-b61e-4445-9412-bf1c45269ff1" (UID: "ee1f413c-b61e-4445-9412-bf1c45269ff1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.250217 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ee1f413c-b61e-4445-9412-bf1c45269ff1" (UID: "ee1f413c-b61e-4445-9412-bf1c45269ff1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.257089 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ee1f413c-b61e-4445-9412-bf1c45269ff1" (UID: "ee1f413c-b61e-4445-9412-bf1c45269ff1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.304054 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.304083 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9qkr9\" (UniqueName: \"kubernetes.io/projected/ee1f413c-b61e-4445-9412-bf1c45269ff1-kube-api-access-9qkr9\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.304093 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.304102 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ee1f413c-b61e-4445-9412-bf1c45269ff1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.610128 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:13 crc kubenswrapper[5014]: E1006 21:48:13.610350 5014 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 06 21:48:13 crc kubenswrapper[5014]: E1006 21:48:13.610699 5014 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 06 21:48:13 crc kubenswrapper[5014]: E1006 21:48:13.610777 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift podName:05220712-c8ae-4ac8-9c49-d74770367b33 nodeName:}" failed. No retries permitted until 2025-10-06 21:48:14.610750873 +0000 UTC m=+1039.903787607 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift") pod "swift-storage-0" (UID: "05220712-c8ae-4ac8-9c49-d74770367b33") : configmap "swift-ring-files" not found Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.634412 5014 generic.go:334] "Generic (PLEG): container finished" podID="3956e460-e179-438d-be81-7af1dc5fcfe8" containerID="c214ac2d814bae9c02ab2d0dae1a8a08701dce19c9581273dc223b7ee0546a88" exitCode=0 Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.634551 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" event={"ID":"3956e460-e179-438d-be81-7af1dc5fcfe8","Type":"ContainerDied","Data":"c214ac2d814bae9c02ab2d0dae1a8a08701dce19c9581273dc223b7ee0546a88"} Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.643888 5014 generic.go:334] "Generic (PLEG): container finished" podID="ee1f413c-b61e-4445-9412-bf1c45269ff1" containerID="bd20acfaeec96f7c117a2081ec396835882b819287f64e79d48f59683b4501a9" exitCode=0 Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.645214 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.645776 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" event={"ID":"ee1f413c-b61e-4445-9412-bf1c45269ff1","Type":"ContainerDied","Data":"bd20acfaeec96f7c117a2081ec396835882b819287f64e79d48f59683b4501a9"} Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.645842 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c9499b4f-pgvf8" event={"ID":"ee1f413c-b61e-4445-9412-bf1c45269ff1","Type":"ContainerDied","Data":"d52cbac204a110cbdcb715de21ca7897c9037cf04a0e003c2148cbc42f420913"} Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.645879 5014 scope.go:117] "RemoveContainer" containerID="bd20acfaeec96f7c117a2081ec396835882b819287f64e79d48f59683b4501a9" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.663430 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.697555 5014 scope.go:117] "RemoveContainer" containerID="cd09446e82c8ef7141bfb67551f10fab53c22ad1bec49f9b068e03949fcf7436" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.762134 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54c9499b4f-pgvf8"] Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.773149 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54c9499b4f-pgvf8"] Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.779999 5014 scope.go:117] "RemoveContainer" containerID="bd20acfaeec96f7c117a2081ec396835882b819287f64e79d48f59683b4501a9" Oct 06 21:48:13 crc kubenswrapper[5014]: E1006 21:48:13.780874 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd20acfaeec96f7c117a2081ec396835882b819287f64e79d48f59683b4501a9\": container with ID starting with bd20acfaeec96f7c117a2081ec396835882b819287f64e79d48f59683b4501a9 not found: ID does not exist" containerID="bd20acfaeec96f7c117a2081ec396835882b819287f64e79d48f59683b4501a9" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.780933 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd20acfaeec96f7c117a2081ec396835882b819287f64e79d48f59683b4501a9"} err="failed to get container status \"bd20acfaeec96f7c117a2081ec396835882b819287f64e79d48f59683b4501a9\": rpc error: code = NotFound desc = could not find container \"bd20acfaeec96f7c117a2081ec396835882b819287f64e79d48f59683b4501a9\": container with ID starting with bd20acfaeec96f7c117a2081ec396835882b819287f64e79d48f59683b4501a9 not found: ID does not exist" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.780964 5014 scope.go:117] "RemoveContainer" containerID="cd09446e82c8ef7141bfb67551f10fab53c22ad1bec49f9b068e03949fcf7436" Oct 06 21:48:13 crc kubenswrapper[5014]: E1006 21:48:13.783442 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd09446e82c8ef7141bfb67551f10fab53c22ad1bec49f9b068e03949fcf7436\": container with ID starting with cd09446e82c8ef7141bfb67551f10fab53c22ad1bec49f9b068e03949fcf7436 not found: ID does not exist" containerID="cd09446e82c8ef7141bfb67551f10fab53c22ad1bec49f9b068e03949fcf7436" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.783497 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd09446e82c8ef7141bfb67551f10fab53c22ad1bec49f9b068e03949fcf7436"} err="failed to get container status \"cd09446e82c8ef7141bfb67551f10fab53c22ad1bec49f9b068e03949fcf7436\": rpc error: code = NotFound desc = could not find container \"cd09446e82c8ef7141bfb67551f10fab53c22ad1bec49f9b068e03949fcf7436\": container with ID starting with cd09446e82c8ef7141bfb67551f10fab53c22ad1bec49f9b068e03949fcf7436 not found: ID does not exist" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.815579 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Oct 06 21:48:13 crc kubenswrapper[5014]: I1006 21:48:13.876495 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Oct 06 21:48:14 crc kubenswrapper[5014]: I1006 21:48:14.643578 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:14 crc kubenswrapper[5014]: E1006 21:48:14.643768 5014 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 06 21:48:14 crc kubenswrapper[5014]: E1006 21:48:14.643786 5014 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 06 21:48:14 crc kubenswrapper[5014]: E1006 21:48:14.643837 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift podName:05220712-c8ae-4ac8-9c49-d74770367b33 nodeName:}" failed. No retries permitted until 2025-10-06 21:48:16.643822434 +0000 UTC m=+1041.936859168 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift") pod "swift-storage-0" (UID: "05220712-c8ae-4ac8-9c49-d74770367b33") : configmap "swift-ring-files" not found Oct 06 21:48:14 crc kubenswrapper[5014]: I1006 21:48:14.653921 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" event={"ID":"3956e460-e179-438d-be81-7af1dc5fcfe8","Type":"ContainerStarted","Data":"9984bfb89d550dfd25e4f5866d31fdaf0ecb771bdb962caaf7cb9f1f81dcf9ea"} Oct 06 21:48:14 crc kubenswrapper[5014]: I1006 21:48:14.654033 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:14 crc kubenswrapper[5014]: I1006 21:48:14.672907 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" podStartSLOduration=3.672861106 podStartE2EDuration="3.672861106s" podCreationTimestamp="2025-10-06 21:48:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:48:14.668388714 +0000 UTC m=+1039.961425438" watchObservedRunningTime="2025-10-06 21:48:14.672861106 +0000 UTC m=+1039.965897840" Oct 06 21:48:15 crc kubenswrapper[5014]: I1006 21:48:15.332921 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-2txhf"] Oct 06 21:48:15 crc kubenswrapper[5014]: E1006 21:48:15.334932 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee1f413c-b61e-4445-9412-bf1c45269ff1" containerName="dnsmasq-dns" Oct 06 21:48:15 crc kubenswrapper[5014]: I1006 21:48:15.335147 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee1f413c-b61e-4445-9412-bf1c45269ff1" containerName="dnsmasq-dns" Oct 06 21:48:15 crc kubenswrapper[5014]: E1006 21:48:15.335402 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee1f413c-b61e-4445-9412-bf1c45269ff1" containerName="init" Oct 06 21:48:15 crc kubenswrapper[5014]: I1006 21:48:15.335605 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee1f413c-b61e-4445-9412-bf1c45269ff1" containerName="init" Oct 06 21:48:15 crc kubenswrapper[5014]: I1006 21:48:15.336236 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee1f413c-b61e-4445-9412-bf1c45269ff1" containerName="dnsmasq-dns" Oct 06 21:48:15 crc kubenswrapper[5014]: I1006 21:48:15.337478 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2txhf" Oct 06 21:48:15 crc kubenswrapper[5014]: I1006 21:48:15.347470 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-2txhf"] Oct 06 21:48:15 crc kubenswrapper[5014]: I1006 21:48:15.355240 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsc8h\" (UniqueName: \"kubernetes.io/projected/cfdf797b-91a7-456f-a243-f08ba12aafbf-kube-api-access-xsc8h\") pod \"glance-db-create-2txhf\" (UID: \"cfdf797b-91a7-456f-a243-f08ba12aafbf\") " pod="openstack/glance-db-create-2txhf" Oct 06 21:48:15 crc kubenswrapper[5014]: I1006 21:48:15.456518 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsc8h\" (UniqueName: \"kubernetes.io/projected/cfdf797b-91a7-456f-a243-f08ba12aafbf-kube-api-access-xsc8h\") pod \"glance-db-create-2txhf\" (UID: \"cfdf797b-91a7-456f-a243-f08ba12aafbf\") " pod="openstack/glance-db-create-2txhf" Oct 06 21:48:15 crc kubenswrapper[5014]: I1006 21:48:15.484826 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsc8h\" (UniqueName: \"kubernetes.io/projected/cfdf797b-91a7-456f-a243-f08ba12aafbf-kube-api-access-xsc8h\") pod \"glance-db-create-2txhf\" (UID: \"cfdf797b-91a7-456f-a243-f08ba12aafbf\") " pod="openstack/glance-db-create-2txhf" Oct 06 21:48:15 crc kubenswrapper[5014]: I1006 21:48:15.516254 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee1f413c-b61e-4445-9412-bf1c45269ff1" path="/var/lib/kubelet/pods/ee1f413c-b61e-4445-9412-bf1c45269ff1/volumes" Oct 06 21:48:15 crc kubenswrapper[5014]: I1006 21:48:15.684882 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2txhf" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.213444 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-2txhf"] Oct 06 21:48:16 crc kubenswrapper[5014]: W1006 21:48:16.216152 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcfdf797b_91a7_456f_a243_f08ba12aafbf.slice/crio-873c09428d262cc867d5b052ee0fcc45e86d53be022e4ed99b822cd61cfc9ec2 WatchSource:0}: Error finding container 873c09428d262cc867d5b052ee0fcc45e86d53be022e4ed99b822cd61cfc9ec2: Status 404 returned error can't find the container with id 873c09428d262cc867d5b052ee0fcc45e86d53be022e4ed99b822cd61cfc9ec2 Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.676318 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:16 crc kubenswrapper[5014]: E1006 21:48:16.676542 5014 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 06 21:48:16 crc kubenswrapper[5014]: E1006 21:48:16.676799 5014 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 06 21:48:16 crc kubenswrapper[5014]: E1006 21:48:16.676852 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift podName:05220712-c8ae-4ac8-9c49-d74770367b33 nodeName:}" failed. No retries permitted until 2025-10-06 21:48:20.676835457 +0000 UTC m=+1045.969872191 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift") pod "swift-storage-0" (UID: "05220712-c8ae-4ac8-9c49-d74770367b33") : configmap "swift-ring-files" not found Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.677233 5014 generic.go:334] "Generic (PLEG): container finished" podID="cfdf797b-91a7-456f-a243-f08ba12aafbf" containerID="1dc17faa00611299d42adc11ce138f3ba8363c30f35a27bbcfbaadfdf3a44df1" exitCode=0 Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.677269 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2txhf" event={"ID":"cfdf797b-91a7-456f-a243-f08ba12aafbf","Type":"ContainerDied","Data":"1dc17faa00611299d42adc11ce138f3ba8363c30f35a27bbcfbaadfdf3a44df1"} Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.677296 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2txhf" event={"ID":"cfdf797b-91a7-456f-a243-f08ba12aafbf","Type":"ContainerStarted","Data":"873c09428d262cc867d5b052ee0fcc45e86d53be022e4ed99b822cd61cfc9ec2"} Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.808750 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-lcqfw"] Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.810251 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.812252 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.812746 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.815368 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.819476 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-lcqfw"] Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.879900 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-dispersionconf\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.879962 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2b292ee8-d31c-4e73-80e9-ccc915aeb406-etc-swift\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.879995 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2b292ee8-d31c-4e73-80e9-ccc915aeb406-scripts\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.880011 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-swiftconf\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.880032 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-combined-ca-bundle\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.880054 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2b292ee8-d31c-4e73-80e9-ccc915aeb406-ring-data-devices\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.880089 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xdr2\" (UniqueName: \"kubernetes.io/projected/2b292ee8-d31c-4e73-80e9-ccc915aeb406-kube-api-access-8xdr2\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.980751 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xdr2\" (UniqueName: \"kubernetes.io/projected/2b292ee8-d31c-4e73-80e9-ccc915aeb406-kube-api-access-8xdr2\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.980891 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-dispersionconf\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.980974 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2b292ee8-d31c-4e73-80e9-ccc915aeb406-etc-swift\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.981035 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2b292ee8-d31c-4e73-80e9-ccc915aeb406-scripts\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.981067 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-swiftconf\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.981110 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-combined-ca-bundle\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.981150 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2b292ee8-d31c-4e73-80e9-ccc915aeb406-ring-data-devices\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.981996 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2b292ee8-d31c-4e73-80e9-ccc915aeb406-etc-swift\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.982246 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2b292ee8-d31c-4e73-80e9-ccc915aeb406-ring-data-devices\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.982579 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2b292ee8-d31c-4e73-80e9-ccc915aeb406-scripts\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.989631 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-combined-ca-bundle\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.991084 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-swiftconf\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:16 crc kubenswrapper[5014]: I1006 21:48:16.994299 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-dispersionconf\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:17 crc kubenswrapper[5014]: I1006 21:48:17.006132 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xdr2\" (UniqueName: \"kubernetes.io/projected/2b292ee8-d31c-4e73-80e9-ccc915aeb406-kube-api-access-8xdr2\") pod \"swift-ring-rebalance-lcqfw\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:17 crc kubenswrapper[5014]: I1006 21:48:17.135095 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:17 crc kubenswrapper[5014]: I1006 21:48:17.641914 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-lcqfw"] Oct 06 21:48:17 crc kubenswrapper[5014]: W1006 21:48:17.645050 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b292ee8_d31c_4e73_80e9_ccc915aeb406.slice/crio-0746398e2638240d943445f81990aa42e520e1a60e48c8bef9cf1ada0bdab7a8 WatchSource:0}: Error finding container 0746398e2638240d943445f81990aa42e520e1a60e48c8bef9cf1ada0bdab7a8: Status 404 returned error can't find the container with id 0746398e2638240d943445f81990aa42e520e1a60e48c8bef9cf1ada0bdab7a8 Oct 06 21:48:17 crc kubenswrapper[5014]: I1006 21:48:17.691910 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-lcqfw" event={"ID":"2b292ee8-d31c-4e73-80e9-ccc915aeb406","Type":"ContainerStarted","Data":"0746398e2638240d943445f81990aa42e520e1a60e48c8bef9cf1ada0bdab7a8"} Oct 06 21:48:18 crc kubenswrapper[5014]: I1006 21:48:18.032514 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2txhf" Oct 06 21:48:18 crc kubenswrapper[5014]: I1006 21:48:18.200918 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xsc8h\" (UniqueName: \"kubernetes.io/projected/cfdf797b-91a7-456f-a243-f08ba12aafbf-kube-api-access-xsc8h\") pod \"cfdf797b-91a7-456f-a243-f08ba12aafbf\" (UID: \"cfdf797b-91a7-456f-a243-f08ba12aafbf\") " Oct 06 21:48:18 crc kubenswrapper[5014]: I1006 21:48:18.223317 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfdf797b-91a7-456f-a243-f08ba12aafbf-kube-api-access-xsc8h" (OuterVolumeSpecName: "kube-api-access-xsc8h") pod "cfdf797b-91a7-456f-a243-f08ba12aafbf" (UID: "cfdf797b-91a7-456f-a243-f08ba12aafbf"). InnerVolumeSpecName "kube-api-access-xsc8h". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:18 crc kubenswrapper[5014]: I1006 21:48:18.302743 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xsc8h\" (UniqueName: \"kubernetes.io/projected/cfdf797b-91a7-456f-a243-f08ba12aafbf-kube-api-access-xsc8h\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:18 crc kubenswrapper[5014]: I1006 21:48:18.663656 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:18 crc kubenswrapper[5014]: I1006 21:48:18.709026 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2txhf" event={"ID":"cfdf797b-91a7-456f-a243-f08ba12aafbf","Type":"ContainerDied","Data":"873c09428d262cc867d5b052ee0fcc45e86d53be022e4ed99b822cd61cfc9ec2"} Oct 06 21:48:18 crc kubenswrapper[5014]: I1006 21:48:18.709067 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="873c09428d262cc867d5b052ee0fcc45e86d53be022e4ed99b822cd61cfc9ec2" Oct 06 21:48:18 crc kubenswrapper[5014]: I1006 21:48:18.709122 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2txhf" Oct 06 21:48:19 crc kubenswrapper[5014]: I1006 21:48:19.671285 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-pskhp"] Oct 06 21:48:19 crc kubenswrapper[5014]: E1006 21:48:19.671671 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfdf797b-91a7-456f-a243-f08ba12aafbf" containerName="mariadb-database-create" Oct 06 21:48:19 crc kubenswrapper[5014]: I1006 21:48:19.671684 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfdf797b-91a7-456f-a243-f08ba12aafbf" containerName="mariadb-database-create" Oct 06 21:48:19 crc kubenswrapper[5014]: I1006 21:48:19.671853 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfdf797b-91a7-456f-a243-f08ba12aafbf" containerName="mariadb-database-create" Oct 06 21:48:19 crc kubenswrapper[5014]: I1006 21:48:19.672426 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-pskhp" Oct 06 21:48:19 crc kubenswrapper[5014]: I1006 21:48:19.679675 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-pskhp"] Oct 06 21:48:19 crc kubenswrapper[5014]: I1006 21:48:19.827395 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkmm7\" (UniqueName: \"kubernetes.io/projected/c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb-kube-api-access-gkmm7\") pod \"keystone-db-create-pskhp\" (UID: \"c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb\") " pod="openstack/keystone-db-create-pskhp" Oct 06 21:48:19 crc kubenswrapper[5014]: I1006 21:48:19.929809 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkmm7\" (UniqueName: \"kubernetes.io/projected/c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb-kube-api-access-gkmm7\") pod \"keystone-db-create-pskhp\" (UID: \"c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb\") " pod="openstack/keystone-db-create-pskhp" Oct 06 21:48:19 crc kubenswrapper[5014]: I1006 21:48:19.948086 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkmm7\" (UniqueName: \"kubernetes.io/projected/c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb-kube-api-access-gkmm7\") pod \"keystone-db-create-pskhp\" (UID: \"c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb\") " pod="openstack/keystone-db-create-pskhp" Oct 06 21:48:19 crc kubenswrapper[5014]: I1006 21:48:19.982515 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-gc874"] Oct 06 21:48:19 crc kubenswrapper[5014]: I1006 21:48:19.984732 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-gc874" Oct 06 21:48:19 crc kubenswrapper[5014]: I1006 21:48:19.994718 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-gc874"] Oct 06 21:48:20 crc kubenswrapper[5014]: I1006 21:48:20.000812 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-pskhp" Oct 06 21:48:20 crc kubenswrapper[5014]: I1006 21:48:20.133723 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvpbt\" (UniqueName: \"kubernetes.io/projected/34c1300b-0bf2-4bb3-af1b-7fb7f33182d2-kube-api-access-vvpbt\") pod \"placement-db-create-gc874\" (UID: \"34c1300b-0bf2-4bb3-af1b-7fb7f33182d2\") " pod="openstack/placement-db-create-gc874" Oct 06 21:48:20 crc kubenswrapper[5014]: I1006 21:48:20.220406 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Oct 06 21:48:20 crc kubenswrapper[5014]: I1006 21:48:20.243753 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvpbt\" (UniqueName: \"kubernetes.io/projected/34c1300b-0bf2-4bb3-af1b-7fb7f33182d2-kube-api-access-vvpbt\") pod \"placement-db-create-gc874\" (UID: \"34c1300b-0bf2-4bb3-af1b-7fb7f33182d2\") " pod="openstack/placement-db-create-gc874" Oct 06 21:48:20 crc kubenswrapper[5014]: I1006 21:48:20.268318 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvpbt\" (UniqueName: \"kubernetes.io/projected/34c1300b-0bf2-4bb3-af1b-7fb7f33182d2-kube-api-access-vvpbt\") pod \"placement-db-create-gc874\" (UID: \"34c1300b-0bf2-4bb3-af1b-7fb7f33182d2\") " pod="openstack/placement-db-create-gc874" Oct 06 21:48:20 crc kubenswrapper[5014]: I1006 21:48:20.309192 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-gc874" Oct 06 21:48:20 crc kubenswrapper[5014]: I1006 21:48:20.755582 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:20 crc kubenswrapper[5014]: E1006 21:48:20.755792 5014 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 06 21:48:20 crc kubenswrapper[5014]: E1006 21:48:20.755822 5014 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 06 21:48:20 crc kubenswrapper[5014]: E1006 21:48:20.755889 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift podName:05220712-c8ae-4ac8-9c49-d74770367b33 nodeName:}" failed. No retries permitted until 2025-10-06 21:48:28.755869485 +0000 UTC m=+1054.048906219 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift") pod "swift-storage-0" (UID: "05220712-c8ae-4ac8-9c49-d74770367b33") : configmap "swift-ring-files" not found Oct 06 21:48:21 crc kubenswrapper[5014]: I1006 21:48:21.679276 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-pskhp"] Oct 06 21:48:21 crc kubenswrapper[5014]: I1006 21:48:21.731858 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-gc874"] Oct 06 21:48:21 crc kubenswrapper[5014]: I1006 21:48:21.737755 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-lcqfw" event={"ID":"2b292ee8-d31c-4e73-80e9-ccc915aeb406","Type":"ContainerStarted","Data":"59c2238484a6521933fa9031cd9b5dcc70ad7ec740e5a0296966fba0fa91301e"} Oct 06 21:48:21 crc kubenswrapper[5014]: W1006 21:48:21.740971 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34c1300b_0bf2_4bb3_af1b_7fb7f33182d2.slice/crio-177a3e069280836d69b52f89802ef15f2c328426c83053eea8a77fae3164c72f WatchSource:0}: Error finding container 177a3e069280836d69b52f89802ef15f2c328426c83053eea8a77fae3164c72f: Status 404 returned error can't find the container with id 177a3e069280836d69b52f89802ef15f2c328426c83053eea8a77fae3164c72f Oct 06 21:48:21 crc kubenswrapper[5014]: I1006 21:48:21.742122 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-pskhp" event={"ID":"c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb","Type":"ContainerStarted","Data":"9b33982674f1d457ecb5a5d45079e3aca0542ff2f10c0adac9c78841b061a386"} Oct 06 21:48:21 crc kubenswrapper[5014]: I1006 21:48:21.764001 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-lcqfw" podStartSLOduration=2.213528445 podStartE2EDuration="5.763985865s" podCreationTimestamp="2025-10-06 21:48:16 +0000 UTC" firstStartedPulling="2025-10-06 21:48:17.647078504 +0000 UTC m=+1042.940115248" lastFinishedPulling="2025-10-06 21:48:21.197535914 +0000 UTC m=+1046.490572668" observedRunningTime="2025-10-06 21:48:21.760938108 +0000 UTC m=+1047.053974862" watchObservedRunningTime="2025-10-06 21:48:21.763985865 +0000 UTC m=+1047.057022599" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.046267 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.123171 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bc45f6dcf-ns4nt"] Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.123598 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" podUID="9786c139-7d1f-420a-a601-fa3f046f7bc0" containerName="dnsmasq-dns" containerID="cri-o://49be2575edd42b3800377ea1ab0a1cfd121f2079f4ec100a62bcf8c067d0c918" gracePeriod=10 Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.575962 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.588533 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-config\") pod \"9786c139-7d1f-420a-a601-fa3f046f7bc0\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.588612 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-ovsdbserver-nb\") pod \"9786c139-7d1f-420a-a601-fa3f046f7bc0\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.588678 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-dns-svc\") pod \"9786c139-7d1f-420a-a601-fa3f046f7bc0\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.588707 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-ovsdbserver-sb\") pod \"9786c139-7d1f-420a-a601-fa3f046f7bc0\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.647426 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9786c139-7d1f-420a-a601-fa3f046f7bc0" (UID: "9786c139-7d1f-420a-a601-fa3f046f7bc0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.657441 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9786c139-7d1f-420a-a601-fa3f046f7bc0" (UID: "9786c139-7d1f-420a-a601-fa3f046f7bc0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.658864 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-config" (OuterVolumeSpecName: "config") pod "9786c139-7d1f-420a-a601-fa3f046f7bc0" (UID: "9786c139-7d1f-420a-a601-fa3f046f7bc0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.672971 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9786c139-7d1f-420a-a601-fa3f046f7bc0" (UID: "9786c139-7d1f-420a-a601-fa3f046f7bc0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.689895 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwmkq\" (UniqueName: \"kubernetes.io/projected/9786c139-7d1f-420a-a601-fa3f046f7bc0-kube-api-access-cwmkq\") pod \"9786c139-7d1f-420a-a601-fa3f046f7bc0\" (UID: \"9786c139-7d1f-420a-a601-fa3f046f7bc0\") " Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.690386 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.690409 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.690423 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.690435 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9786c139-7d1f-420a-a601-fa3f046f7bc0-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.694278 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9786c139-7d1f-420a-a601-fa3f046f7bc0-kube-api-access-cwmkq" (OuterVolumeSpecName: "kube-api-access-cwmkq") pod "9786c139-7d1f-420a-a601-fa3f046f7bc0" (UID: "9786c139-7d1f-420a-a601-fa3f046f7bc0"). InnerVolumeSpecName "kube-api-access-cwmkq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.769192 5014 generic.go:334] "Generic (PLEG): container finished" podID="9786c139-7d1f-420a-a601-fa3f046f7bc0" containerID="49be2575edd42b3800377ea1ab0a1cfd121f2079f4ec100a62bcf8c067d0c918" exitCode=0 Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.769257 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.769292 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" event={"ID":"9786c139-7d1f-420a-a601-fa3f046f7bc0","Type":"ContainerDied","Data":"49be2575edd42b3800377ea1ab0a1cfd121f2079f4ec100a62bcf8c067d0c918"} Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.769330 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bc45f6dcf-ns4nt" event={"ID":"9786c139-7d1f-420a-a601-fa3f046f7bc0","Type":"ContainerDied","Data":"cba547220805c2d9b7c6e07eea919b31501fe698c0a3ca4b969c8033b0e1cb7f"} Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.769346 5014 scope.go:117] "RemoveContainer" containerID="49be2575edd42b3800377ea1ab0a1cfd121f2079f4ec100a62bcf8c067d0c918" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.774428 5014 generic.go:334] "Generic (PLEG): container finished" podID="34c1300b-0bf2-4bb3-af1b-7fb7f33182d2" containerID="b1ed3d8475b8964f1ec03d5f52097548f4dc541c1ee4d2d12bb0acf7fc254d27" exitCode=0 Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.774495 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-gc874" event={"ID":"34c1300b-0bf2-4bb3-af1b-7fb7f33182d2","Type":"ContainerDied","Data":"b1ed3d8475b8964f1ec03d5f52097548f4dc541c1ee4d2d12bb0acf7fc254d27"} Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.774517 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-gc874" event={"ID":"34c1300b-0bf2-4bb3-af1b-7fb7f33182d2","Type":"ContainerStarted","Data":"177a3e069280836d69b52f89802ef15f2c328426c83053eea8a77fae3164c72f"} Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.783294 5014 generic.go:334] "Generic (PLEG): container finished" podID="c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb" containerID="a65f05bfe49a06b41ce8f1ed83aafd17d0fda384fbadf3ce3c9e2d53641f8290" exitCode=0 Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.783396 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-pskhp" event={"ID":"c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb","Type":"ContainerDied","Data":"a65f05bfe49a06b41ce8f1ed83aafd17d0fda384fbadf3ce3c9e2d53641f8290"} Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.792821 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwmkq\" (UniqueName: \"kubernetes.io/projected/9786c139-7d1f-420a-a601-fa3f046f7bc0-kube-api-access-cwmkq\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.822907 5014 scope.go:117] "RemoveContainer" containerID="8dc6af6ae8b4f91e14ff13ef957fc50a85a639820e6da53ca3fb2c6470bcd6bc" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.826768 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bc45f6dcf-ns4nt"] Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.832102 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bc45f6dcf-ns4nt"] Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.841841 5014 scope.go:117] "RemoveContainer" containerID="49be2575edd42b3800377ea1ab0a1cfd121f2079f4ec100a62bcf8c067d0c918" Oct 06 21:48:22 crc kubenswrapper[5014]: E1006 21:48:22.842315 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49be2575edd42b3800377ea1ab0a1cfd121f2079f4ec100a62bcf8c067d0c918\": container with ID starting with 49be2575edd42b3800377ea1ab0a1cfd121f2079f4ec100a62bcf8c067d0c918 not found: ID does not exist" containerID="49be2575edd42b3800377ea1ab0a1cfd121f2079f4ec100a62bcf8c067d0c918" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.842346 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49be2575edd42b3800377ea1ab0a1cfd121f2079f4ec100a62bcf8c067d0c918"} err="failed to get container status \"49be2575edd42b3800377ea1ab0a1cfd121f2079f4ec100a62bcf8c067d0c918\": rpc error: code = NotFound desc = could not find container \"49be2575edd42b3800377ea1ab0a1cfd121f2079f4ec100a62bcf8c067d0c918\": container with ID starting with 49be2575edd42b3800377ea1ab0a1cfd121f2079f4ec100a62bcf8c067d0c918 not found: ID does not exist" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.842368 5014 scope.go:117] "RemoveContainer" containerID="8dc6af6ae8b4f91e14ff13ef957fc50a85a639820e6da53ca3fb2c6470bcd6bc" Oct 06 21:48:22 crc kubenswrapper[5014]: E1006 21:48:22.842823 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8dc6af6ae8b4f91e14ff13ef957fc50a85a639820e6da53ca3fb2c6470bcd6bc\": container with ID starting with 8dc6af6ae8b4f91e14ff13ef957fc50a85a639820e6da53ca3fb2c6470bcd6bc not found: ID does not exist" containerID="8dc6af6ae8b4f91e14ff13ef957fc50a85a639820e6da53ca3fb2c6470bcd6bc" Oct 06 21:48:22 crc kubenswrapper[5014]: I1006 21:48:22.842847 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8dc6af6ae8b4f91e14ff13ef957fc50a85a639820e6da53ca3fb2c6470bcd6bc"} err="failed to get container status \"8dc6af6ae8b4f91e14ff13ef957fc50a85a639820e6da53ca3fb2c6470bcd6bc\": rpc error: code = NotFound desc = could not find container \"8dc6af6ae8b4f91e14ff13ef957fc50a85a639820e6da53ca3fb2c6470bcd6bc\": container with ID starting with 8dc6af6ae8b4f91e14ff13ef957fc50a85a639820e6da53ca3fb2c6470bcd6bc not found: ID does not exist" Oct 06 21:48:23 crc kubenswrapper[5014]: I1006 21:48:23.498417 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9786c139-7d1f-420a-a601-fa3f046f7bc0" path="/var/lib/kubelet/pods/9786c139-7d1f-420a-a601-fa3f046f7bc0/volumes" Oct 06 21:48:24 crc kubenswrapper[5014]: I1006 21:48:24.211113 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-pskhp" Oct 06 21:48:24 crc kubenswrapper[5014]: I1006 21:48:24.222323 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-gc874" Oct 06 21:48:24 crc kubenswrapper[5014]: I1006 21:48:24.316931 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkmm7\" (UniqueName: \"kubernetes.io/projected/c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb-kube-api-access-gkmm7\") pod \"c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb\" (UID: \"c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb\") " Oct 06 21:48:24 crc kubenswrapper[5014]: I1006 21:48:24.316985 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvpbt\" (UniqueName: \"kubernetes.io/projected/34c1300b-0bf2-4bb3-af1b-7fb7f33182d2-kube-api-access-vvpbt\") pod \"34c1300b-0bf2-4bb3-af1b-7fb7f33182d2\" (UID: \"34c1300b-0bf2-4bb3-af1b-7fb7f33182d2\") " Oct 06 21:48:24 crc kubenswrapper[5014]: I1006 21:48:24.322054 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb-kube-api-access-gkmm7" (OuterVolumeSpecName: "kube-api-access-gkmm7") pod "c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb" (UID: "c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb"). InnerVolumeSpecName "kube-api-access-gkmm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:24 crc kubenswrapper[5014]: I1006 21:48:24.322794 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34c1300b-0bf2-4bb3-af1b-7fb7f33182d2-kube-api-access-vvpbt" (OuterVolumeSpecName: "kube-api-access-vvpbt") pod "34c1300b-0bf2-4bb3-af1b-7fb7f33182d2" (UID: "34c1300b-0bf2-4bb3-af1b-7fb7f33182d2"). InnerVolumeSpecName "kube-api-access-vvpbt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:24 crc kubenswrapper[5014]: I1006 21:48:24.417979 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkmm7\" (UniqueName: \"kubernetes.io/projected/c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb-kube-api-access-gkmm7\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:24 crc kubenswrapper[5014]: I1006 21:48:24.418017 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vvpbt\" (UniqueName: \"kubernetes.io/projected/34c1300b-0bf2-4bb3-af1b-7fb7f33182d2-kube-api-access-vvpbt\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:24 crc kubenswrapper[5014]: I1006 21:48:24.806948 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-pskhp" event={"ID":"c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb","Type":"ContainerDied","Data":"9b33982674f1d457ecb5a5d45079e3aca0542ff2f10c0adac9c78841b061a386"} Oct 06 21:48:24 crc kubenswrapper[5014]: I1006 21:48:24.806975 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-pskhp" Oct 06 21:48:24 crc kubenswrapper[5014]: I1006 21:48:24.806994 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9b33982674f1d457ecb5a5d45079e3aca0542ff2f10c0adac9c78841b061a386" Oct 06 21:48:24 crc kubenswrapper[5014]: I1006 21:48:24.809263 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-gc874" event={"ID":"34c1300b-0bf2-4bb3-af1b-7fb7f33182d2","Type":"ContainerDied","Data":"177a3e069280836d69b52f89802ef15f2c328426c83053eea8a77fae3164c72f"} Oct 06 21:48:24 crc kubenswrapper[5014]: I1006 21:48:24.809290 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="177a3e069280836d69b52f89802ef15f2c328426c83053eea8a77fae3164c72f" Oct 06 21:48:24 crc kubenswrapper[5014]: I1006 21:48:24.809373 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-gc874" Oct 06 21:48:25 crc kubenswrapper[5014]: I1006 21:48:25.426050 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-6d2b-account-create-dn55l"] Oct 06 21:48:25 crc kubenswrapper[5014]: E1006 21:48:25.426473 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb" containerName="mariadb-database-create" Oct 06 21:48:25 crc kubenswrapper[5014]: I1006 21:48:25.426490 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb" containerName="mariadb-database-create" Oct 06 21:48:25 crc kubenswrapper[5014]: E1006 21:48:25.426507 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9786c139-7d1f-420a-a601-fa3f046f7bc0" containerName="dnsmasq-dns" Oct 06 21:48:25 crc kubenswrapper[5014]: I1006 21:48:25.426515 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9786c139-7d1f-420a-a601-fa3f046f7bc0" containerName="dnsmasq-dns" Oct 06 21:48:25 crc kubenswrapper[5014]: E1006 21:48:25.426525 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9786c139-7d1f-420a-a601-fa3f046f7bc0" containerName="init" Oct 06 21:48:25 crc kubenswrapper[5014]: I1006 21:48:25.426533 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9786c139-7d1f-420a-a601-fa3f046f7bc0" containerName="init" Oct 06 21:48:25 crc kubenswrapper[5014]: E1006 21:48:25.426551 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34c1300b-0bf2-4bb3-af1b-7fb7f33182d2" containerName="mariadb-database-create" Oct 06 21:48:25 crc kubenswrapper[5014]: I1006 21:48:25.426558 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="34c1300b-0bf2-4bb3-af1b-7fb7f33182d2" containerName="mariadb-database-create" Oct 06 21:48:25 crc kubenswrapper[5014]: I1006 21:48:25.426770 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="34c1300b-0bf2-4bb3-af1b-7fb7f33182d2" containerName="mariadb-database-create" Oct 06 21:48:25 crc kubenswrapper[5014]: I1006 21:48:25.426882 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="9786c139-7d1f-420a-a601-fa3f046f7bc0" containerName="dnsmasq-dns" Oct 06 21:48:25 crc kubenswrapper[5014]: I1006 21:48:25.426904 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb" containerName="mariadb-database-create" Oct 06 21:48:25 crc kubenswrapper[5014]: I1006 21:48:25.427554 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6d2b-account-create-dn55l" Oct 06 21:48:25 crc kubenswrapper[5014]: I1006 21:48:25.430886 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Oct 06 21:48:25 crc kubenswrapper[5014]: I1006 21:48:25.450496 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-6d2b-account-create-dn55l"] Oct 06 21:48:25 crc kubenswrapper[5014]: I1006 21:48:25.548006 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-966rr\" (UniqueName: \"kubernetes.io/projected/93196faa-4f5f-4f54-82de-89cad407be89-kube-api-access-966rr\") pod \"glance-6d2b-account-create-dn55l\" (UID: \"93196faa-4f5f-4f54-82de-89cad407be89\") " pod="openstack/glance-6d2b-account-create-dn55l" Oct 06 21:48:25 crc kubenswrapper[5014]: I1006 21:48:25.650142 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-966rr\" (UniqueName: \"kubernetes.io/projected/93196faa-4f5f-4f54-82de-89cad407be89-kube-api-access-966rr\") pod \"glance-6d2b-account-create-dn55l\" (UID: \"93196faa-4f5f-4f54-82de-89cad407be89\") " pod="openstack/glance-6d2b-account-create-dn55l" Oct 06 21:48:25 crc kubenswrapper[5014]: I1006 21:48:25.673216 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-966rr\" (UniqueName: \"kubernetes.io/projected/93196faa-4f5f-4f54-82de-89cad407be89-kube-api-access-966rr\") pod \"glance-6d2b-account-create-dn55l\" (UID: \"93196faa-4f5f-4f54-82de-89cad407be89\") " pod="openstack/glance-6d2b-account-create-dn55l" Oct 06 21:48:25 crc kubenswrapper[5014]: I1006 21:48:25.754929 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6d2b-account-create-dn55l" Oct 06 21:48:26 crc kubenswrapper[5014]: I1006 21:48:26.236297 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-6d2b-account-create-dn55l"] Oct 06 21:48:26 crc kubenswrapper[5014]: W1006 21:48:26.238594 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod93196faa_4f5f_4f54_82de_89cad407be89.slice/crio-aa19df7816b015cfe6a7473da612e814bfb363894748a3e1481ce4e5bb949573 WatchSource:0}: Error finding container aa19df7816b015cfe6a7473da612e814bfb363894748a3e1481ce4e5bb949573: Status 404 returned error can't find the container with id aa19df7816b015cfe6a7473da612e814bfb363894748a3e1481ce4e5bb949573 Oct 06 21:48:26 crc kubenswrapper[5014]: I1006 21:48:26.839787 5014 generic.go:334] "Generic (PLEG): container finished" podID="93196faa-4f5f-4f54-82de-89cad407be89" containerID="bf464222d39ef12e38a56f26482daae9287d8c9042a0d329a039052b127e923d" exitCode=0 Oct 06 21:48:26 crc kubenswrapper[5014]: I1006 21:48:26.839838 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6d2b-account-create-dn55l" event={"ID":"93196faa-4f5f-4f54-82de-89cad407be89","Type":"ContainerDied","Data":"bf464222d39ef12e38a56f26482daae9287d8c9042a0d329a039052b127e923d"} Oct 06 21:48:26 crc kubenswrapper[5014]: I1006 21:48:26.839865 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6d2b-account-create-dn55l" event={"ID":"93196faa-4f5f-4f54-82de-89cad407be89","Type":"ContainerStarted","Data":"aa19df7816b015cfe6a7473da612e814bfb363894748a3e1481ce4e5bb949573"} Oct 06 21:48:28 crc kubenswrapper[5014]: I1006 21:48:28.211935 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6d2b-account-create-dn55l" Oct 06 21:48:28 crc kubenswrapper[5014]: I1006 21:48:28.400273 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-966rr\" (UniqueName: \"kubernetes.io/projected/93196faa-4f5f-4f54-82de-89cad407be89-kube-api-access-966rr\") pod \"93196faa-4f5f-4f54-82de-89cad407be89\" (UID: \"93196faa-4f5f-4f54-82de-89cad407be89\") " Oct 06 21:48:28 crc kubenswrapper[5014]: I1006 21:48:28.408911 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93196faa-4f5f-4f54-82de-89cad407be89-kube-api-access-966rr" (OuterVolumeSpecName: "kube-api-access-966rr") pod "93196faa-4f5f-4f54-82de-89cad407be89" (UID: "93196faa-4f5f-4f54-82de-89cad407be89"). InnerVolumeSpecName "kube-api-access-966rr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:28 crc kubenswrapper[5014]: I1006 21:48:28.504204 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-966rr\" (UniqueName: \"kubernetes.io/projected/93196faa-4f5f-4f54-82de-89cad407be89-kube-api-access-966rr\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:28 crc kubenswrapper[5014]: I1006 21:48:28.810073 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:28 crc kubenswrapper[5014]: I1006 21:48:28.818283 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift\") pod \"swift-storage-0\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " pod="openstack/swift-storage-0" Oct 06 21:48:28 crc kubenswrapper[5014]: I1006 21:48:28.860426 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6d2b-account-create-dn55l" Oct 06 21:48:28 crc kubenswrapper[5014]: I1006 21:48:28.860447 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6d2b-account-create-dn55l" event={"ID":"93196faa-4f5f-4f54-82de-89cad407be89","Type":"ContainerDied","Data":"aa19df7816b015cfe6a7473da612e814bfb363894748a3e1481ce4e5bb949573"} Oct 06 21:48:28 crc kubenswrapper[5014]: I1006 21:48:28.860505 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aa19df7816b015cfe6a7473da612e814bfb363894748a3e1481ce4e5bb949573" Oct 06 21:48:28 crc kubenswrapper[5014]: I1006 21:48:28.863463 5014 generic.go:334] "Generic (PLEG): container finished" podID="2b292ee8-d31c-4e73-80e9-ccc915aeb406" containerID="59c2238484a6521933fa9031cd9b5dcc70ad7ec740e5a0296966fba0fa91301e" exitCode=0 Oct 06 21:48:28 crc kubenswrapper[5014]: I1006 21:48:28.863547 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-lcqfw" event={"ID":"2b292ee8-d31c-4e73-80e9-ccc915aeb406","Type":"ContainerDied","Data":"59c2238484a6521933fa9031cd9b5dcc70ad7ec740e5a0296966fba0fa91301e"} Oct 06 21:48:29 crc kubenswrapper[5014]: I1006 21:48:29.082967 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Oct 06 21:48:29 crc kubenswrapper[5014]: I1006 21:48:29.666821 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Oct 06 21:48:29 crc kubenswrapper[5014]: I1006 21:48:29.872426 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerStarted","Data":"388c1806671b0c63c0b894715eca2c4682ab7a227f6b8b059963f0fa1623aeb4"} Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.191385 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.334291 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-combined-ca-bundle\") pod \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.334357 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-dispersionconf\") pod \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.334420 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2b292ee8-d31c-4e73-80e9-ccc915aeb406-etc-swift\") pod \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.334439 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-swiftconf\") pod \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.334461 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2b292ee8-d31c-4e73-80e9-ccc915aeb406-scripts\") pod \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.334499 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2b292ee8-d31c-4e73-80e9-ccc915aeb406-ring-data-devices\") pod \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.334531 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xdr2\" (UniqueName: \"kubernetes.io/projected/2b292ee8-d31c-4e73-80e9-ccc915aeb406-kube-api-access-8xdr2\") pod \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\" (UID: \"2b292ee8-d31c-4e73-80e9-ccc915aeb406\") " Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.335395 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b292ee8-d31c-4e73-80e9-ccc915aeb406-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "2b292ee8-d31c-4e73-80e9-ccc915aeb406" (UID: "2b292ee8-d31c-4e73-80e9-ccc915aeb406"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.335512 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b292ee8-d31c-4e73-80e9-ccc915aeb406-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "2b292ee8-d31c-4e73-80e9-ccc915aeb406" (UID: "2b292ee8-d31c-4e73-80e9-ccc915aeb406"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.345676 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b292ee8-d31c-4e73-80e9-ccc915aeb406-kube-api-access-8xdr2" (OuterVolumeSpecName: "kube-api-access-8xdr2") pod "2b292ee8-d31c-4e73-80e9-ccc915aeb406" (UID: "2b292ee8-d31c-4e73-80e9-ccc915aeb406"). InnerVolumeSpecName "kube-api-access-8xdr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.373839 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "2b292ee8-d31c-4e73-80e9-ccc915aeb406" (UID: "2b292ee8-d31c-4e73-80e9-ccc915aeb406"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.391786 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "2b292ee8-d31c-4e73-80e9-ccc915aeb406" (UID: "2b292ee8-d31c-4e73-80e9-ccc915aeb406"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.424385 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2b292ee8-d31c-4e73-80e9-ccc915aeb406" (UID: "2b292ee8-d31c-4e73-80e9-ccc915aeb406"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.436982 5014 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2b292ee8-d31c-4e73-80e9-ccc915aeb406-ring-data-devices\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.437019 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xdr2\" (UniqueName: \"kubernetes.io/projected/2b292ee8-d31c-4e73-80e9-ccc915aeb406-kube-api-access-8xdr2\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.437032 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.437043 5014 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-dispersionconf\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.437052 5014 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2b292ee8-d31c-4e73-80e9-ccc915aeb406-etc-swift\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.437060 5014 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2b292ee8-d31c-4e73-80e9-ccc915aeb406-swiftconf\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.438820 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b292ee8-d31c-4e73-80e9-ccc915aeb406-scripts" (OuterVolumeSpecName: "scripts") pod "2b292ee8-d31c-4e73-80e9-ccc915aeb406" (UID: "2b292ee8-d31c-4e73-80e9-ccc915aeb406"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.539693 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2b292ee8-d31c-4e73-80e9-ccc915aeb406-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.572887 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-jc6q5"] Oct 06 21:48:30 crc kubenswrapper[5014]: E1006 21:48:30.573305 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93196faa-4f5f-4f54-82de-89cad407be89" containerName="mariadb-account-create" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.573354 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="93196faa-4f5f-4f54-82de-89cad407be89" containerName="mariadb-account-create" Oct 06 21:48:30 crc kubenswrapper[5014]: E1006 21:48:30.573372 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b292ee8-d31c-4e73-80e9-ccc915aeb406" containerName="swift-ring-rebalance" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.573379 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b292ee8-d31c-4e73-80e9-ccc915aeb406" containerName="swift-ring-rebalance" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.573651 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b292ee8-d31c-4e73-80e9-ccc915aeb406" containerName="swift-ring-rebalance" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.573672 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="93196faa-4f5f-4f54-82de-89cad407be89" containerName="mariadb-account-create" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.574683 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jc6q5" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.579723 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.579917 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-wr8xs" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.586920 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-jc6q5"] Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.743819 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-config-data\") pod \"glance-db-sync-jc6q5\" (UID: \"2a133d28-39e7-4768-83c0-9b59bef04241\") " pod="openstack/glance-db-sync-jc6q5" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.744170 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-combined-ca-bundle\") pod \"glance-db-sync-jc6q5\" (UID: \"2a133d28-39e7-4768-83c0-9b59bef04241\") " pod="openstack/glance-db-sync-jc6q5" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.744214 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-db-sync-config-data\") pod \"glance-db-sync-jc6q5\" (UID: \"2a133d28-39e7-4768-83c0-9b59bef04241\") " pod="openstack/glance-db-sync-jc6q5" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.744273 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gr2kt\" (UniqueName: \"kubernetes.io/projected/2a133d28-39e7-4768-83c0-9b59bef04241-kube-api-access-gr2kt\") pod \"glance-db-sync-jc6q5\" (UID: \"2a133d28-39e7-4768-83c0-9b59bef04241\") " pod="openstack/glance-db-sync-jc6q5" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.808672 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-f4vpp" podUID="74db136d-3445-4a7e-bcae-4645888ec806" containerName="ovn-controller" probeResult="failure" output=< Oct 06 21:48:30 crc kubenswrapper[5014]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Oct 06 21:48:30 crc kubenswrapper[5014]: > Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.845668 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-config-data\") pod \"glance-db-sync-jc6q5\" (UID: \"2a133d28-39e7-4768-83c0-9b59bef04241\") " pod="openstack/glance-db-sync-jc6q5" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.845718 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-combined-ca-bundle\") pod \"glance-db-sync-jc6q5\" (UID: \"2a133d28-39e7-4768-83c0-9b59bef04241\") " pod="openstack/glance-db-sync-jc6q5" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.845758 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-db-sync-config-data\") pod \"glance-db-sync-jc6q5\" (UID: \"2a133d28-39e7-4768-83c0-9b59bef04241\") " pod="openstack/glance-db-sync-jc6q5" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.845815 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gr2kt\" (UniqueName: \"kubernetes.io/projected/2a133d28-39e7-4768-83c0-9b59bef04241-kube-api-access-gr2kt\") pod \"glance-db-sync-jc6q5\" (UID: \"2a133d28-39e7-4768-83c0-9b59bef04241\") " pod="openstack/glance-db-sync-jc6q5" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.851949 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-config-data\") pod \"glance-db-sync-jc6q5\" (UID: \"2a133d28-39e7-4768-83c0-9b59bef04241\") " pod="openstack/glance-db-sync-jc6q5" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.855050 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.857920 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.861497 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-combined-ca-bundle\") pod \"glance-db-sync-jc6q5\" (UID: \"2a133d28-39e7-4768-83c0-9b59bef04241\") " pod="openstack/glance-db-sync-jc6q5" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.862045 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-db-sync-config-data\") pod \"glance-db-sync-jc6q5\" (UID: \"2a133d28-39e7-4768-83c0-9b59bef04241\") " pod="openstack/glance-db-sync-jc6q5" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.884448 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gr2kt\" (UniqueName: \"kubernetes.io/projected/2a133d28-39e7-4768-83c0-9b59bef04241-kube-api-access-gr2kt\") pod \"glance-db-sync-jc6q5\" (UID: \"2a133d28-39e7-4768-83c0-9b59bef04241\") " pod="openstack/glance-db-sync-jc6q5" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.887606 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-lcqfw" event={"ID":"2b292ee8-d31c-4e73-80e9-ccc915aeb406","Type":"ContainerDied","Data":"0746398e2638240d943445f81990aa42e520e1a60e48c8bef9cf1ada0bdab7a8"} Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.887656 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0746398e2638240d943445f81990aa42e520e1a60e48c8bef9cf1ada0bdab7a8" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.887706 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-lcqfw" Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.891990 5014 generic.go:334] "Generic (PLEG): container finished" podID="4b977fc8-6c11-41e6-9500-f0da2d66aea1" containerID="166b1c2d8ef910ca7a9787911a267a186c4cd418c36eafd7d791b96769ac9953" exitCode=0 Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.892043 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4b977fc8-6c11-41e6-9500-f0da2d66aea1","Type":"ContainerDied","Data":"166b1c2d8ef910ca7a9787911a267a186c4cd418c36eafd7d791b96769ac9953"} Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.897346 5014 generic.go:334] "Generic (PLEG): container finished" podID="c8f59d7d-f71b-46b0-bd32-476a2517a3b6" containerID="09d7c2ebf6323f6087d0027d356d50b1abd8029b6b521b27fd0322faa1293a5d" exitCode=0 Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.897511 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c8f59d7d-f71b-46b0-bd32-476a2517a3b6","Type":"ContainerDied","Data":"09d7c2ebf6323f6087d0027d356d50b1abd8029b6b521b27fd0322faa1293a5d"} Oct 06 21:48:30 crc kubenswrapper[5014]: I1006 21:48:30.931089 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jc6q5" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.080103 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-f4vpp-config-l7bc4"] Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.081435 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.085698 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.095424 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-f4vpp-config-l7bc4"] Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.253185 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-run-ovn\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.253237 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6348531f-8d26-409a-91cd-79e2793fc723-scripts\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.253269 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8hkv\" (UniqueName: \"kubernetes.io/projected/6348531f-8d26-409a-91cd-79e2793fc723-kube-api-access-x8hkv\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.253379 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-log-ovn\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.253426 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/6348531f-8d26-409a-91cd-79e2793fc723-additional-scripts\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.253449 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-run\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.355725 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/6348531f-8d26-409a-91cd-79e2793fc723-additional-scripts\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.355782 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-run\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.355914 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-run-ovn\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.355943 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6348531f-8d26-409a-91cd-79e2793fc723-scripts\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.355972 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8hkv\" (UniqueName: \"kubernetes.io/projected/6348531f-8d26-409a-91cd-79e2793fc723-kube-api-access-x8hkv\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.356002 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-log-ovn\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.356252 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-log-ovn\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.356327 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-run\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.356257 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-run-ovn\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.356424 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/6348531f-8d26-409a-91cd-79e2793fc723-additional-scripts\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.358120 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6348531f-8d26-409a-91cd-79e2793fc723-scripts\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.378450 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8hkv\" (UniqueName: \"kubernetes.io/projected/6348531f-8d26-409a-91cd-79e2793fc723-kube-api-access-x8hkv\") pod \"ovn-controller-f4vpp-config-l7bc4\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.399596 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.913026 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4b977fc8-6c11-41e6-9500-f0da2d66aea1","Type":"ContainerStarted","Data":"5d8ae003e8dc923a3ad6afd87e268712900c1d33e130493581a8045c5495deed"} Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.913939 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.915261 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerStarted","Data":"c89697b2f7e511e4d98946d9a1e15c87f63523c323ed01382b801bdcf2a5fd0e"} Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.915301 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerStarted","Data":"6c9c5b52c3ece4980e0a040effefe3ca51032003d2dec58743e81e14eff2bd08"} Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.918867 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c8f59d7d-f71b-46b0-bd32-476a2517a3b6","Type":"ContainerStarted","Data":"eb4d1ac3e92d3cfcfc09e4936b90190475ab22e4925f8ce4f363a59470abfbe5"} Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.919332 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.939580 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=49.421567334 podStartE2EDuration="56.939559891s" podCreationTimestamp="2025-10-06 21:47:35 +0000 UTC" firstStartedPulling="2025-10-06 21:47:49.186913645 +0000 UTC m=+1014.479950379" lastFinishedPulling="2025-10-06 21:47:56.704906202 +0000 UTC m=+1021.997942936" observedRunningTime="2025-10-06 21:48:31.937399293 +0000 UTC m=+1057.230436027" watchObservedRunningTime="2025-10-06 21:48:31.939559891 +0000 UTC m=+1057.232596625" Oct 06 21:48:31 crc kubenswrapper[5014]: I1006 21:48:31.977426 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=49.585750796 podStartE2EDuration="56.977406633s" podCreationTimestamp="2025-10-06 21:47:35 +0000 UTC" firstStartedPulling="2025-10-06 21:47:49.18709424 +0000 UTC m=+1014.480130964" lastFinishedPulling="2025-10-06 21:47:56.578750067 +0000 UTC m=+1021.871786801" observedRunningTime="2025-10-06 21:48:31.974676826 +0000 UTC m=+1057.267713560" watchObservedRunningTime="2025-10-06 21:48:31.977406633 +0000 UTC m=+1057.270443367" Oct 06 21:48:32 crc kubenswrapper[5014]: I1006 21:48:32.020301 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-f4vpp-config-l7bc4"] Oct 06 21:48:32 crc kubenswrapper[5014]: I1006 21:48:32.029420 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-jc6q5"] Oct 06 21:48:32 crc kubenswrapper[5014]: W1006 21:48:32.030687 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6348531f_8d26_409a_91cd_79e2793fc723.slice/crio-c590535f5cfd4b72b7f69424484cc3923b80cf9f3efcbc3d98038965c5d03433 WatchSource:0}: Error finding container c590535f5cfd4b72b7f69424484cc3923b80cf9f3efcbc3d98038965c5d03433: Status 404 returned error can't find the container with id c590535f5cfd4b72b7f69424484cc3923b80cf9f3efcbc3d98038965c5d03433 Oct 06 21:48:32 crc kubenswrapper[5014]: W1006 21:48:32.034049 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a133d28_39e7_4768_83c0_9b59bef04241.slice/crio-58b52c3ee905a320064b8092d7c9e5e7fd85fdc658b0d5889dfa424dd651f9b6 WatchSource:0}: Error finding container 58b52c3ee905a320064b8092d7c9e5e7fd85fdc658b0d5889dfa424dd651f9b6: Status 404 returned error can't find the container with id 58b52c3ee905a320064b8092d7c9e5e7fd85fdc658b0d5889dfa424dd651f9b6 Oct 06 21:48:32 crc kubenswrapper[5014]: I1006 21:48:32.938527 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerStarted","Data":"cd46a133abb2d0da04ee1bdf5e939ea29e0a1f05c5d2709ffb6fe66c7656b025"} Oct 06 21:48:32 crc kubenswrapper[5014]: I1006 21:48:32.939748 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerStarted","Data":"9721677542abcd800ca6abd93a96aae1a47615326a5084a4d6243c91345c2fac"} Oct 06 21:48:32 crc kubenswrapper[5014]: I1006 21:48:32.941788 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jc6q5" event={"ID":"2a133d28-39e7-4768-83c0-9b59bef04241","Type":"ContainerStarted","Data":"58b52c3ee905a320064b8092d7c9e5e7fd85fdc658b0d5889dfa424dd651f9b6"} Oct 06 21:48:32 crc kubenswrapper[5014]: I1006 21:48:32.944231 5014 generic.go:334] "Generic (PLEG): container finished" podID="6348531f-8d26-409a-91cd-79e2793fc723" containerID="1959b77a570f91cdaf5697d5f0178928de8cdf98024f0dde0600e72f93b3a946" exitCode=0 Oct 06 21:48:32 crc kubenswrapper[5014]: I1006 21:48:32.945909 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f4vpp-config-l7bc4" event={"ID":"6348531f-8d26-409a-91cd-79e2793fc723","Type":"ContainerDied","Data":"1959b77a570f91cdaf5697d5f0178928de8cdf98024f0dde0600e72f93b3a946"} Oct 06 21:48:32 crc kubenswrapper[5014]: I1006 21:48:32.945976 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f4vpp-config-l7bc4" event={"ID":"6348531f-8d26-409a-91cd-79e2793fc723","Type":"ContainerStarted","Data":"c590535f5cfd4b72b7f69424484cc3923b80cf9f3efcbc3d98038965c5d03433"} Oct 06 21:48:33 crc kubenswrapper[5014]: I1006 21:48:33.957645 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerStarted","Data":"7cac454e8ca5e7af86acbea681e349fae219a72762c7c9b2d920802f4e900488"} Oct 06 21:48:33 crc kubenswrapper[5014]: I1006 21:48:33.959147 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerStarted","Data":"d474f69bb53df946b7ca116226cc152b798653cd694c8d2e13e91ca35d97a083"} Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.319035 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.413997 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-log-ovn\") pod \"6348531f-8d26-409a-91cd-79e2793fc723\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.414088 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "6348531f-8d26-409a-91cd-79e2793fc723" (UID: "6348531f-8d26-409a-91cd-79e2793fc723"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.414313 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/6348531f-8d26-409a-91cd-79e2793fc723-additional-scripts\") pod \"6348531f-8d26-409a-91cd-79e2793fc723\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.414383 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-run-ovn\") pod \"6348531f-8d26-409a-91cd-79e2793fc723\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.414422 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6348531f-8d26-409a-91cd-79e2793fc723-scripts\") pod \"6348531f-8d26-409a-91cd-79e2793fc723\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.414497 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8hkv\" (UniqueName: \"kubernetes.io/projected/6348531f-8d26-409a-91cd-79e2793fc723-kube-api-access-x8hkv\") pod \"6348531f-8d26-409a-91cd-79e2793fc723\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.414548 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-run\") pod \"6348531f-8d26-409a-91cd-79e2793fc723\" (UID: \"6348531f-8d26-409a-91cd-79e2793fc723\") " Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.415245 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "6348531f-8d26-409a-91cd-79e2793fc723" (UID: "6348531f-8d26-409a-91cd-79e2793fc723"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.415374 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-run" (OuterVolumeSpecName: "var-run") pod "6348531f-8d26-409a-91cd-79e2793fc723" (UID: "6348531f-8d26-409a-91cd-79e2793fc723"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.417479 5014 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-run\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.417510 5014 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-log-ovn\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.417523 5014 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6348531f-8d26-409a-91cd-79e2793fc723-var-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.417861 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6348531f-8d26-409a-91cd-79e2793fc723-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "6348531f-8d26-409a-91cd-79e2793fc723" (UID: "6348531f-8d26-409a-91cd-79e2793fc723"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.418147 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6348531f-8d26-409a-91cd-79e2793fc723-scripts" (OuterVolumeSpecName: "scripts") pod "6348531f-8d26-409a-91cd-79e2793fc723" (UID: "6348531f-8d26-409a-91cd-79e2793fc723"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.423185 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6348531f-8d26-409a-91cd-79e2793fc723-kube-api-access-x8hkv" (OuterVolumeSpecName: "kube-api-access-x8hkv") pod "6348531f-8d26-409a-91cd-79e2793fc723" (UID: "6348531f-8d26-409a-91cd-79e2793fc723"). InnerVolumeSpecName "kube-api-access-x8hkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.518956 5014 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/6348531f-8d26-409a-91cd-79e2793fc723-additional-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.518990 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6348531f-8d26-409a-91cd-79e2793fc723-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.519001 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8hkv\" (UniqueName: \"kubernetes.io/projected/6348531f-8d26-409a-91cd-79e2793fc723-kube-api-access-x8hkv\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.971506 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerStarted","Data":"1f314e90fabf2871f624de1f83d91f5c7a575db546028192458bff82b490aaf1"} Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.971551 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerStarted","Data":"2493907335e39e153ca0be098c83d4742c5eba72021fb33f5ee6878e68153c1e"} Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.975226 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f4vpp-config-l7bc4" event={"ID":"6348531f-8d26-409a-91cd-79e2793fc723","Type":"ContainerDied","Data":"c590535f5cfd4b72b7f69424484cc3923b80cf9f3efcbc3d98038965c5d03433"} Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.975273 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c590535f5cfd4b72b7f69424484cc3923b80cf9f3efcbc3d98038965c5d03433" Oct 06 21:48:34 crc kubenswrapper[5014]: I1006 21:48:34.975321 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f4vpp-config-l7bc4" Oct 06 21:48:35 crc kubenswrapper[5014]: I1006 21:48:35.421972 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-f4vpp-config-l7bc4"] Oct 06 21:48:35 crc kubenswrapper[5014]: I1006 21:48:35.432336 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-f4vpp-config-l7bc4"] Oct 06 21:48:35 crc kubenswrapper[5014]: I1006 21:48:35.499359 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6348531f-8d26-409a-91cd-79e2793fc723" path="/var/lib/kubelet/pods/6348531f-8d26-409a-91cd-79e2793fc723/volumes" Oct 06 21:48:35 crc kubenswrapper[5014]: I1006 21:48:35.831817 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-f4vpp" Oct 06 21:48:35 crc kubenswrapper[5014]: I1006 21:48:35.990648 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerStarted","Data":"8c6ee51bcb2aa47b868de1b54eca4d9f60363bb30fa19275e968859b3dd6b211"} Oct 06 21:48:35 crc kubenswrapper[5014]: I1006 21:48:35.990687 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerStarted","Data":"f82e937ea9ed292c5b6b2e0ed118bb02c000e888acec699801c0aef2ac147660"} Oct 06 21:48:35 crc kubenswrapper[5014]: I1006 21:48:35.990697 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerStarted","Data":"27a6ef41e95125c0eb0ca5c6f7e55fe49acb2d2f70f72c3b31eab3c0fb6cd5e9"} Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.007922 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerStarted","Data":"794ec9c1ac30a70a53bd70e3b117f5a1d66ee9fd5b291b3742bca6c5ce899fad"} Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.008162 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerStarted","Data":"46a2145bb6c9090f1616d68d05a57ae9daf788137e9fef57af9e989e14a6f0ac"} Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.008195 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerStarted","Data":"706435a61f829966a3dd81b83578483182dd823c97c44d790d748dae50b96405"} Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.008203 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerStarted","Data":"32fd41e8d0ac65e1df8af741b277995fce5511e9e71104405b7ef81473bcf2a0"} Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.044099 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=20.359936953 podStartE2EDuration="26.044079521s" podCreationTimestamp="2025-10-06 21:48:11 +0000 UTC" firstStartedPulling="2025-10-06 21:48:29.664363812 +0000 UTC m=+1054.957400566" lastFinishedPulling="2025-10-06 21:48:35.3485064 +0000 UTC m=+1060.641543134" observedRunningTime="2025-10-06 21:48:37.040929071 +0000 UTC m=+1062.333965815" watchObservedRunningTime="2025-10-06 21:48:37.044079521 +0000 UTC m=+1062.337116255" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.332044 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-59f45f6cf7-cmxts"] Oct 06 21:48:37 crc kubenswrapper[5014]: E1006 21:48:37.333060 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6348531f-8d26-409a-91cd-79e2793fc723" containerName="ovn-config" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.333081 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="6348531f-8d26-409a-91cd-79e2793fc723" containerName="ovn-config" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.333574 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="6348531f-8d26-409a-91cd-79e2793fc723" containerName="ovn-config" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.335463 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.343702 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.358263 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59f45f6cf7-cmxts"] Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.471500 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-ovsdbserver-sb\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.471586 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-config\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.471771 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-dns-swift-storage-0\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.471954 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-ovsdbserver-nb\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.472076 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tgb7h\" (UniqueName: \"kubernetes.io/projected/0eea5313-8d2a-4066-9896-15ed6ada8e4a-kube-api-access-tgb7h\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.472130 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-dns-svc\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.573930 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-dns-svc\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.573986 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-ovsdbserver-sb\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.574027 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-config\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.574066 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-dns-swift-storage-0\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.574943 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-config\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.575002 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-ovsdbserver-nb\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.575030 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-dns-swift-storage-0\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.575098 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tgb7h\" (UniqueName: \"kubernetes.io/projected/0eea5313-8d2a-4066-9896-15ed6ada8e4a-kube-api-access-tgb7h\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.575216 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-ovsdbserver-sb\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.575584 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-ovsdbserver-nb\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.576761 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-dns-svc\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.594275 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tgb7h\" (UniqueName: \"kubernetes.io/projected/0eea5313-8d2a-4066-9896-15ed6ada8e4a-kube-api-access-tgb7h\") pod \"dnsmasq-dns-59f45f6cf7-cmxts\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:37 crc kubenswrapper[5014]: I1006 21:48:37.670051 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:39 crc kubenswrapper[5014]: I1006 21:48:39.742304 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7147-account-create-q2lpp"] Oct 06 21:48:39 crc kubenswrapper[5014]: I1006 21:48:39.744115 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7147-account-create-q2lpp" Oct 06 21:48:39 crc kubenswrapper[5014]: I1006 21:48:39.745920 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Oct 06 21:48:39 crc kubenswrapper[5014]: I1006 21:48:39.753256 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7147-account-create-q2lpp"] Oct 06 21:48:39 crc kubenswrapper[5014]: I1006 21:48:39.813165 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5vmq\" (UniqueName: \"kubernetes.io/projected/862503fc-2bba-487c-aed2-83403621b99b-kube-api-access-z5vmq\") pod \"keystone-7147-account-create-q2lpp\" (UID: \"862503fc-2bba-487c-aed2-83403621b99b\") " pod="openstack/keystone-7147-account-create-q2lpp" Oct 06 21:48:39 crc kubenswrapper[5014]: I1006 21:48:39.914766 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5vmq\" (UniqueName: \"kubernetes.io/projected/862503fc-2bba-487c-aed2-83403621b99b-kube-api-access-z5vmq\") pod \"keystone-7147-account-create-q2lpp\" (UID: \"862503fc-2bba-487c-aed2-83403621b99b\") " pod="openstack/keystone-7147-account-create-q2lpp" Oct 06 21:48:39 crc kubenswrapper[5014]: I1006 21:48:39.934236 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5vmq\" (UniqueName: \"kubernetes.io/projected/862503fc-2bba-487c-aed2-83403621b99b-kube-api-access-z5vmq\") pod \"keystone-7147-account-create-q2lpp\" (UID: \"862503fc-2bba-487c-aed2-83403621b99b\") " pod="openstack/keystone-7147-account-create-q2lpp" Oct 06 21:48:40 crc kubenswrapper[5014]: I1006 21:48:40.067134 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7147-account-create-q2lpp" Oct 06 21:48:40 crc kubenswrapper[5014]: I1006 21:48:40.117560 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-8e33-account-create-jrgt5"] Oct 06 21:48:40 crc kubenswrapper[5014]: I1006 21:48:40.119392 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8e33-account-create-jrgt5" Oct 06 21:48:40 crc kubenswrapper[5014]: I1006 21:48:40.122442 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Oct 06 21:48:40 crc kubenswrapper[5014]: I1006 21:48:40.129687 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-8e33-account-create-jrgt5"] Oct 06 21:48:40 crc kubenswrapper[5014]: I1006 21:48:40.220921 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gm86t\" (UniqueName: \"kubernetes.io/projected/bc12c0f4-59e6-40f3-a4ae-0fa426576beb-kube-api-access-gm86t\") pod \"placement-8e33-account-create-jrgt5\" (UID: \"bc12c0f4-59e6-40f3-a4ae-0fa426576beb\") " pod="openstack/placement-8e33-account-create-jrgt5" Oct 06 21:48:40 crc kubenswrapper[5014]: I1006 21:48:40.322195 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gm86t\" (UniqueName: \"kubernetes.io/projected/bc12c0f4-59e6-40f3-a4ae-0fa426576beb-kube-api-access-gm86t\") pod \"placement-8e33-account-create-jrgt5\" (UID: \"bc12c0f4-59e6-40f3-a4ae-0fa426576beb\") " pod="openstack/placement-8e33-account-create-jrgt5" Oct 06 21:48:40 crc kubenswrapper[5014]: I1006 21:48:40.338481 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gm86t\" (UniqueName: \"kubernetes.io/projected/bc12c0f4-59e6-40f3-a4ae-0fa426576beb-kube-api-access-gm86t\") pod \"placement-8e33-account-create-jrgt5\" (UID: \"bc12c0f4-59e6-40f3-a4ae-0fa426576beb\") " pod="openstack/placement-8e33-account-create-jrgt5" Oct 06 21:48:40 crc kubenswrapper[5014]: I1006 21:48:40.445634 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8e33-account-create-jrgt5" Oct 06 21:48:45 crc kubenswrapper[5014]: I1006 21:48:45.219327 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-8e33-account-create-jrgt5"] Oct 06 21:48:45 crc kubenswrapper[5014]: W1006 21:48:45.228802 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbc12c0f4_59e6_40f3_a4ae_0fa426576beb.slice/crio-8346931b8b809779467f6107f9190240980845bb5941505bfffcceaf5cd10fba WatchSource:0}: Error finding container 8346931b8b809779467f6107f9190240980845bb5941505bfffcceaf5cd10fba: Status 404 returned error can't find the container with id 8346931b8b809779467f6107f9190240980845bb5941505bfffcceaf5cd10fba Oct 06 21:48:45 crc kubenswrapper[5014]: W1006 21:48:45.284052 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0eea5313_8d2a_4066_9896_15ed6ada8e4a.slice/crio-161fe1edefec2c5541287e723a41958fcebe30a2d25688c7bc43da706a4f97d6 WatchSource:0}: Error finding container 161fe1edefec2c5541287e723a41958fcebe30a2d25688c7bc43da706a4f97d6: Status 404 returned error can't find the container with id 161fe1edefec2c5541287e723a41958fcebe30a2d25688c7bc43da706a4f97d6 Oct 06 21:48:45 crc kubenswrapper[5014]: I1006 21:48:45.285011 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59f45f6cf7-cmxts"] Oct 06 21:48:45 crc kubenswrapper[5014]: I1006 21:48:45.298137 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7147-account-create-q2lpp"] Oct 06 21:48:46 crc kubenswrapper[5014]: I1006 21:48:46.087829 5014 generic.go:334] "Generic (PLEG): container finished" podID="862503fc-2bba-487c-aed2-83403621b99b" containerID="f45765ebde0f9dc12d53e796e701e54e1c7970b480a74ce9768eb092f10479b4" exitCode=0 Oct 06 21:48:46 crc kubenswrapper[5014]: I1006 21:48:46.088026 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7147-account-create-q2lpp" event={"ID":"862503fc-2bba-487c-aed2-83403621b99b","Type":"ContainerDied","Data":"f45765ebde0f9dc12d53e796e701e54e1c7970b480a74ce9768eb092f10479b4"} Oct 06 21:48:46 crc kubenswrapper[5014]: I1006 21:48:46.088167 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7147-account-create-q2lpp" event={"ID":"862503fc-2bba-487c-aed2-83403621b99b","Type":"ContainerStarted","Data":"194f49de6d313559a59c8244e0cc9c388a23781552d9ea85c86e3a8298f117bb"} Oct 06 21:48:46 crc kubenswrapper[5014]: I1006 21:48:46.090491 5014 generic.go:334] "Generic (PLEG): container finished" podID="0eea5313-8d2a-4066-9896-15ed6ada8e4a" containerID="84d8b33cd4ef3659cebede0f811eb719bc8a0b8766890078700632fd9a6dca70" exitCode=0 Oct 06 21:48:46 crc kubenswrapper[5014]: I1006 21:48:46.090549 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" event={"ID":"0eea5313-8d2a-4066-9896-15ed6ada8e4a","Type":"ContainerDied","Data":"84d8b33cd4ef3659cebede0f811eb719bc8a0b8766890078700632fd9a6dca70"} Oct 06 21:48:46 crc kubenswrapper[5014]: I1006 21:48:46.090572 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" event={"ID":"0eea5313-8d2a-4066-9896-15ed6ada8e4a","Type":"ContainerStarted","Data":"161fe1edefec2c5541287e723a41958fcebe30a2d25688c7bc43da706a4f97d6"} Oct 06 21:48:46 crc kubenswrapper[5014]: I1006 21:48:46.092737 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jc6q5" event={"ID":"2a133d28-39e7-4768-83c0-9b59bef04241","Type":"ContainerStarted","Data":"1d44e59a2f427f4344ed685e43320717bbfc68cd82e91134294275a2865561e9"} Oct 06 21:48:46 crc kubenswrapper[5014]: I1006 21:48:46.097469 5014 generic.go:334] "Generic (PLEG): container finished" podID="bc12c0f4-59e6-40f3-a4ae-0fa426576beb" containerID="a0b16ec100c37d0a7dab766417c9bd17c325284f3a24595bd52dfb7036ee7536" exitCode=0 Oct 06 21:48:46 crc kubenswrapper[5014]: I1006 21:48:46.097520 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8e33-account-create-jrgt5" event={"ID":"bc12c0f4-59e6-40f3-a4ae-0fa426576beb","Type":"ContainerDied","Data":"a0b16ec100c37d0a7dab766417c9bd17c325284f3a24595bd52dfb7036ee7536"} Oct 06 21:48:46 crc kubenswrapper[5014]: I1006 21:48:46.097540 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8e33-account-create-jrgt5" event={"ID":"bc12c0f4-59e6-40f3-a4ae-0fa426576beb","Type":"ContainerStarted","Data":"8346931b8b809779467f6107f9190240980845bb5941505bfffcceaf5cd10fba"} Oct 06 21:48:46 crc kubenswrapper[5014]: I1006 21:48:46.153537 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-jc6q5" podStartSLOduration=3.291103413 podStartE2EDuration="16.153516576s" podCreationTimestamp="2025-10-06 21:48:30 +0000 UTC" firstStartedPulling="2025-10-06 21:48:32.034697852 +0000 UTC m=+1057.327734586" lastFinishedPulling="2025-10-06 21:48:44.897111015 +0000 UTC m=+1070.190147749" observedRunningTime="2025-10-06 21:48:46.151486471 +0000 UTC m=+1071.444523205" watchObservedRunningTime="2025-10-06 21:48:46.153516576 +0000 UTC m=+1071.446553320" Oct 06 21:48:46 crc kubenswrapper[5014]: I1006 21:48:46.782895 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.101859 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.110227 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" event={"ID":"0eea5313-8d2a-4066-9896-15ed6ada8e4a","Type":"ContainerStarted","Data":"f048351e68f473e6b5e9a6811cebb92cb76348c3c9a8ea09e0cf146f801628ef"} Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.139203 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-6ht9f"] Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.140790 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6ht9f" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.167152 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-6ht9f"] Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.188352 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" podStartSLOduration=10.188338153 podStartE2EDuration="10.188338153s" podCreationTimestamp="2025-10-06 21:48:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:48:47.185817444 +0000 UTC m=+1072.478854178" watchObservedRunningTime="2025-10-06 21:48:47.188338153 +0000 UTC m=+1072.481374877" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.245355 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-s9ksw"] Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.248197 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-s9ksw" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.264061 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7k9lk\" (UniqueName: \"kubernetes.io/projected/86104dc0-8799-4cbd-bead-038f3358dfba-kube-api-access-7k9lk\") pod \"cinder-db-create-6ht9f\" (UID: \"86104dc0-8799-4cbd-bead-038f3358dfba\") " pod="openstack/cinder-db-create-6ht9f" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.272052 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-s9ksw"] Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.365477 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7k9lk\" (UniqueName: \"kubernetes.io/projected/86104dc0-8799-4cbd-bead-038f3358dfba-kube-api-access-7k9lk\") pod \"cinder-db-create-6ht9f\" (UID: \"86104dc0-8799-4cbd-bead-038f3358dfba\") " pod="openstack/cinder-db-create-6ht9f" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.365578 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkdzc\" (UniqueName: \"kubernetes.io/projected/89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c-kube-api-access-jkdzc\") pod \"barbican-db-create-s9ksw\" (UID: \"89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c\") " pod="openstack/barbican-db-create-s9ksw" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.390441 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7k9lk\" (UniqueName: \"kubernetes.io/projected/86104dc0-8799-4cbd-bead-038f3358dfba-kube-api-access-7k9lk\") pod \"cinder-db-create-6ht9f\" (UID: \"86104dc0-8799-4cbd-bead-038f3358dfba\") " pod="openstack/cinder-db-create-6ht9f" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.445959 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-p5kln"] Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.447048 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-p5kln" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.459458 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-p5kln"] Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.468587 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkdzc\" (UniqueName: \"kubernetes.io/projected/89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c-kube-api-access-jkdzc\") pod \"barbican-db-create-s9ksw\" (UID: \"89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c\") " pod="openstack/barbican-db-create-s9ksw" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.468720 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fthf4\" (UniqueName: \"kubernetes.io/projected/1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c-kube-api-access-fthf4\") pod \"neutron-db-create-p5kln\" (UID: \"1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c\") " pod="openstack/neutron-db-create-p5kln" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.477532 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6ht9f" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.494407 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkdzc\" (UniqueName: \"kubernetes.io/projected/89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c-kube-api-access-jkdzc\") pod \"barbican-db-create-s9ksw\" (UID: \"89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c\") " pod="openstack/barbican-db-create-s9ksw" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.568421 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-s9ksw" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.569645 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fthf4\" (UniqueName: \"kubernetes.io/projected/1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c-kube-api-access-fthf4\") pod \"neutron-db-create-p5kln\" (UID: \"1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c\") " pod="openstack/neutron-db-create-p5kln" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.588214 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fthf4\" (UniqueName: \"kubernetes.io/projected/1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c-kube-api-access-fthf4\") pod \"neutron-db-create-p5kln\" (UID: \"1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c\") " pod="openstack/neutron-db-create-p5kln" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.653964 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8e33-account-create-jrgt5" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.668183 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7147-account-create-q2lpp" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.670236 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.766108 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-p5kln" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.772864 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gm86t\" (UniqueName: \"kubernetes.io/projected/bc12c0f4-59e6-40f3-a4ae-0fa426576beb-kube-api-access-gm86t\") pod \"bc12c0f4-59e6-40f3-a4ae-0fa426576beb\" (UID: \"bc12c0f4-59e6-40f3-a4ae-0fa426576beb\") " Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.773128 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5vmq\" (UniqueName: \"kubernetes.io/projected/862503fc-2bba-487c-aed2-83403621b99b-kube-api-access-z5vmq\") pod \"862503fc-2bba-487c-aed2-83403621b99b\" (UID: \"862503fc-2bba-487c-aed2-83403621b99b\") " Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.777098 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/862503fc-2bba-487c-aed2-83403621b99b-kube-api-access-z5vmq" (OuterVolumeSpecName: "kube-api-access-z5vmq") pod "862503fc-2bba-487c-aed2-83403621b99b" (UID: "862503fc-2bba-487c-aed2-83403621b99b"). InnerVolumeSpecName "kube-api-access-z5vmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.779929 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc12c0f4-59e6-40f3-a4ae-0fa426576beb-kube-api-access-gm86t" (OuterVolumeSpecName: "kube-api-access-gm86t") pod "bc12c0f4-59e6-40f3-a4ae-0fa426576beb" (UID: "bc12c0f4-59e6-40f3-a4ae-0fa426576beb"). InnerVolumeSpecName "kube-api-access-gm86t". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.874997 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gm86t\" (UniqueName: \"kubernetes.io/projected/bc12c0f4-59e6-40f3-a4ae-0fa426576beb-kube-api-access-gm86t\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.875030 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5vmq\" (UniqueName: \"kubernetes.io/projected/862503fc-2bba-487c-aed2-83403621b99b-kube-api-access-z5vmq\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:47 crc kubenswrapper[5014]: I1006 21:48:47.968097 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-6ht9f"] Oct 06 21:48:47 crc kubenswrapper[5014]: W1006 21:48:47.972201 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod86104dc0_8799_4cbd_bead_038f3358dfba.slice/crio-f701adf45d021767cee632d7df52d9d44ca5cc0adede5c5a6824fdaa457dcefe WatchSource:0}: Error finding container f701adf45d021767cee632d7df52d9d44ca5cc0adede5c5a6824fdaa457dcefe: Status 404 returned error can't find the container with id f701adf45d021767cee632d7df52d9d44ca5cc0adede5c5a6824fdaa457dcefe Oct 06 21:48:48 crc kubenswrapper[5014]: I1006 21:48:48.095115 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-s9ksw"] Oct 06 21:48:48 crc kubenswrapper[5014]: W1006 21:48:48.102301 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod89b3ca3c_1317_4552_ac8d_d8bcc75ddf0c.slice/crio-273833a753bb56cd745120a5b179279a4e823176c00f84d25781ef4ac8e556df WatchSource:0}: Error finding container 273833a753bb56cd745120a5b179279a4e823176c00f84d25781ef4ac8e556df: Status 404 returned error can't find the container with id 273833a753bb56cd745120a5b179279a4e823176c00f84d25781ef4ac8e556df Oct 06 21:48:48 crc kubenswrapper[5014]: I1006 21:48:48.123461 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6ht9f" event={"ID":"86104dc0-8799-4cbd-bead-038f3358dfba","Type":"ContainerStarted","Data":"034cf2f15c26b5ad5bd392b82b653d9607968a06ea6a5acdbdf60bb484c7ddfd"} Oct 06 21:48:48 crc kubenswrapper[5014]: I1006 21:48:48.123503 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6ht9f" event={"ID":"86104dc0-8799-4cbd-bead-038f3358dfba","Type":"ContainerStarted","Data":"f701adf45d021767cee632d7df52d9d44ca5cc0adede5c5a6824fdaa457dcefe"} Oct 06 21:48:48 crc kubenswrapper[5014]: I1006 21:48:48.125764 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-s9ksw" event={"ID":"89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c","Type":"ContainerStarted","Data":"273833a753bb56cd745120a5b179279a4e823176c00f84d25781ef4ac8e556df"} Oct 06 21:48:48 crc kubenswrapper[5014]: I1006 21:48:48.128175 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8e33-account-create-jrgt5" event={"ID":"bc12c0f4-59e6-40f3-a4ae-0fa426576beb","Type":"ContainerDied","Data":"8346931b8b809779467f6107f9190240980845bb5941505bfffcceaf5cd10fba"} Oct 06 21:48:48 crc kubenswrapper[5014]: I1006 21:48:48.128210 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8346931b8b809779467f6107f9190240980845bb5941505bfffcceaf5cd10fba" Oct 06 21:48:48 crc kubenswrapper[5014]: I1006 21:48:48.128265 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8e33-account-create-jrgt5" Oct 06 21:48:48 crc kubenswrapper[5014]: I1006 21:48:48.136014 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7147-account-create-q2lpp" Oct 06 21:48:48 crc kubenswrapper[5014]: I1006 21:48:48.141532 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7147-account-create-q2lpp" event={"ID":"862503fc-2bba-487c-aed2-83403621b99b","Type":"ContainerDied","Data":"194f49de6d313559a59c8244e0cc9c388a23781552d9ea85c86e3a8298f117bb"} Oct 06 21:48:48 crc kubenswrapper[5014]: I1006 21:48:48.141595 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="194f49de6d313559a59c8244e0cc9c388a23781552d9ea85c86e3a8298f117bb" Oct 06 21:48:48 crc kubenswrapper[5014]: I1006 21:48:48.146777 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-6ht9f" podStartSLOduration=1.146751965 podStartE2EDuration="1.146751965s" podCreationTimestamp="2025-10-06 21:48:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:48:48.141667854 +0000 UTC m=+1073.434704608" watchObservedRunningTime="2025-10-06 21:48:48.146751965 +0000 UTC m=+1073.439788699" Oct 06 21:48:48 crc kubenswrapper[5014]: I1006 21:48:48.210343 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-p5kln"] Oct 06 21:48:49 crc kubenswrapper[5014]: I1006 21:48:49.160778 5014 generic.go:334] "Generic (PLEG): container finished" podID="86104dc0-8799-4cbd-bead-038f3358dfba" containerID="034cf2f15c26b5ad5bd392b82b653d9607968a06ea6a5acdbdf60bb484c7ddfd" exitCode=0 Oct 06 21:48:49 crc kubenswrapper[5014]: I1006 21:48:49.160820 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6ht9f" event={"ID":"86104dc0-8799-4cbd-bead-038f3358dfba","Type":"ContainerDied","Data":"034cf2f15c26b5ad5bd392b82b653d9607968a06ea6a5acdbdf60bb484c7ddfd"} Oct 06 21:48:49 crc kubenswrapper[5014]: I1006 21:48:49.163322 5014 generic.go:334] "Generic (PLEG): container finished" podID="1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c" containerID="03501a4428cc493bd8ed16ab19651e2cdf11e53ac3b24caf94af764503a3b0a1" exitCode=0 Oct 06 21:48:49 crc kubenswrapper[5014]: I1006 21:48:49.163375 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-p5kln" event={"ID":"1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c","Type":"ContainerDied","Data":"03501a4428cc493bd8ed16ab19651e2cdf11e53ac3b24caf94af764503a3b0a1"} Oct 06 21:48:49 crc kubenswrapper[5014]: I1006 21:48:49.163391 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-p5kln" event={"ID":"1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c","Type":"ContainerStarted","Data":"10b39fabf2e7c7c0b539530ef7530f3748208aeda2acecc20e06f0fc135147a0"} Oct 06 21:48:49 crc kubenswrapper[5014]: I1006 21:48:49.165031 5014 generic.go:334] "Generic (PLEG): container finished" podID="89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c" containerID="75260f5fd71272b8cf58b4b12873343b77691304e9712b86843406280124f279" exitCode=0 Oct 06 21:48:49 crc kubenswrapper[5014]: I1006 21:48:49.165090 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-s9ksw" event={"ID":"89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c","Type":"ContainerDied","Data":"75260f5fd71272b8cf58b4b12873343b77691304e9712b86843406280124f279"} Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.228199 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-t5wff"] Oct 06 21:48:50 crc kubenswrapper[5014]: E1006 21:48:50.228717 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="862503fc-2bba-487c-aed2-83403621b99b" containerName="mariadb-account-create" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.228746 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="862503fc-2bba-487c-aed2-83403621b99b" containerName="mariadb-account-create" Oct 06 21:48:50 crc kubenswrapper[5014]: E1006 21:48:50.228772 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc12c0f4-59e6-40f3-a4ae-0fa426576beb" containerName="mariadb-account-create" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.228779 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc12c0f4-59e6-40f3-a4ae-0fa426576beb" containerName="mariadb-account-create" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.228965 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc12c0f4-59e6-40f3-a4ae-0fa426576beb" containerName="mariadb-account-create" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.228984 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="862503fc-2bba-487c-aed2-83403621b99b" containerName="mariadb-account-create" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.229538 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-t5wff" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.233445 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.233652 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-xch9b" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.233469 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.234336 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.244259 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-t5wff"] Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.318328 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8069cfef-c703-4aa4-b1a4-6860fc1734db-config-data\") pod \"keystone-db-sync-t5wff\" (UID: \"8069cfef-c703-4aa4-b1a4-6860fc1734db\") " pod="openstack/keystone-db-sync-t5wff" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.318397 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8069cfef-c703-4aa4-b1a4-6860fc1734db-combined-ca-bundle\") pod \"keystone-db-sync-t5wff\" (UID: \"8069cfef-c703-4aa4-b1a4-6860fc1734db\") " pod="openstack/keystone-db-sync-t5wff" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.318423 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qmlc\" (UniqueName: \"kubernetes.io/projected/8069cfef-c703-4aa4-b1a4-6860fc1734db-kube-api-access-2qmlc\") pod \"keystone-db-sync-t5wff\" (UID: \"8069cfef-c703-4aa4-b1a4-6860fc1734db\") " pod="openstack/keystone-db-sync-t5wff" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.419878 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8069cfef-c703-4aa4-b1a4-6860fc1734db-config-data\") pod \"keystone-db-sync-t5wff\" (UID: \"8069cfef-c703-4aa4-b1a4-6860fc1734db\") " pod="openstack/keystone-db-sync-t5wff" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.419954 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8069cfef-c703-4aa4-b1a4-6860fc1734db-combined-ca-bundle\") pod \"keystone-db-sync-t5wff\" (UID: \"8069cfef-c703-4aa4-b1a4-6860fc1734db\") " pod="openstack/keystone-db-sync-t5wff" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.419976 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qmlc\" (UniqueName: \"kubernetes.io/projected/8069cfef-c703-4aa4-b1a4-6860fc1734db-kube-api-access-2qmlc\") pod \"keystone-db-sync-t5wff\" (UID: \"8069cfef-c703-4aa4-b1a4-6860fc1734db\") " pod="openstack/keystone-db-sync-t5wff" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.446746 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8069cfef-c703-4aa4-b1a4-6860fc1734db-combined-ca-bundle\") pod \"keystone-db-sync-t5wff\" (UID: \"8069cfef-c703-4aa4-b1a4-6860fc1734db\") " pod="openstack/keystone-db-sync-t5wff" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.461565 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8069cfef-c703-4aa4-b1a4-6860fc1734db-config-data\") pod \"keystone-db-sync-t5wff\" (UID: \"8069cfef-c703-4aa4-b1a4-6860fc1734db\") " pod="openstack/keystone-db-sync-t5wff" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.461908 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qmlc\" (UniqueName: \"kubernetes.io/projected/8069cfef-c703-4aa4-b1a4-6860fc1734db-kube-api-access-2qmlc\") pod \"keystone-db-sync-t5wff\" (UID: \"8069cfef-c703-4aa4-b1a4-6860fc1734db\") " pod="openstack/keystone-db-sync-t5wff" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.547499 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-t5wff" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.650401 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-s9ksw" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.655292 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6ht9f" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.692017 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-p5kln" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.724494 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkdzc\" (UniqueName: \"kubernetes.io/projected/89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c-kube-api-access-jkdzc\") pod \"89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c\" (UID: \"89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c\") " Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.724686 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fthf4\" (UniqueName: \"kubernetes.io/projected/1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c-kube-api-access-fthf4\") pod \"1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c\" (UID: \"1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c\") " Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.724781 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7k9lk\" (UniqueName: \"kubernetes.io/projected/86104dc0-8799-4cbd-bead-038f3358dfba-kube-api-access-7k9lk\") pod \"86104dc0-8799-4cbd-bead-038f3358dfba\" (UID: \"86104dc0-8799-4cbd-bead-038f3358dfba\") " Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.728572 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c-kube-api-access-jkdzc" (OuterVolumeSpecName: "kube-api-access-jkdzc") pod "89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c" (UID: "89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c"). InnerVolumeSpecName "kube-api-access-jkdzc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.729028 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86104dc0-8799-4cbd-bead-038f3358dfba-kube-api-access-7k9lk" (OuterVolumeSpecName: "kube-api-access-7k9lk") pod "86104dc0-8799-4cbd-bead-038f3358dfba" (UID: "86104dc0-8799-4cbd-bead-038f3358dfba"). InnerVolumeSpecName "kube-api-access-7k9lk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.744071 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c-kube-api-access-fthf4" (OuterVolumeSpecName: "kube-api-access-fthf4") pod "1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c" (UID: "1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c"). InnerVolumeSpecName "kube-api-access-fthf4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.828005 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkdzc\" (UniqueName: \"kubernetes.io/projected/89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c-kube-api-access-jkdzc\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.828041 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fthf4\" (UniqueName: \"kubernetes.io/projected/1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c-kube-api-access-fthf4\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:50 crc kubenswrapper[5014]: I1006 21:48:50.828051 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7k9lk\" (UniqueName: \"kubernetes.io/projected/86104dc0-8799-4cbd-bead-038f3358dfba-kube-api-access-7k9lk\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:51 crc kubenswrapper[5014]: I1006 21:48:51.016749 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-t5wff"] Oct 06 21:48:51 crc kubenswrapper[5014]: W1006 21:48:51.024464 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8069cfef_c703_4aa4_b1a4_6860fc1734db.slice/crio-6d81ae290b18fc7bebb9de5bf3ba76648f45182e6982ec514d194afd56117990 WatchSource:0}: Error finding container 6d81ae290b18fc7bebb9de5bf3ba76648f45182e6982ec514d194afd56117990: Status 404 returned error can't find the container with id 6d81ae290b18fc7bebb9de5bf3ba76648f45182e6982ec514d194afd56117990 Oct 06 21:48:51 crc kubenswrapper[5014]: I1006 21:48:51.188073 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-p5kln" event={"ID":"1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c","Type":"ContainerDied","Data":"10b39fabf2e7c7c0b539530ef7530f3748208aeda2acecc20e06f0fc135147a0"} Oct 06 21:48:51 crc kubenswrapper[5014]: I1006 21:48:51.188111 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="10b39fabf2e7c7c0b539530ef7530f3748208aeda2acecc20e06f0fc135147a0" Oct 06 21:48:51 crc kubenswrapper[5014]: I1006 21:48:51.188168 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-p5kln" Oct 06 21:48:51 crc kubenswrapper[5014]: I1006 21:48:51.191056 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-s9ksw" event={"ID":"89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c","Type":"ContainerDied","Data":"273833a753bb56cd745120a5b179279a4e823176c00f84d25781ef4ac8e556df"} Oct 06 21:48:51 crc kubenswrapper[5014]: I1006 21:48:51.191096 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="273833a753bb56cd745120a5b179279a4e823176c00f84d25781ef4ac8e556df" Oct 06 21:48:51 crc kubenswrapper[5014]: I1006 21:48:51.191146 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-s9ksw" Oct 06 21:48:51 crc kubenswrapper[5014]: I1006 21:48:51.197274 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-t5wff" event={"ID":"8069cfef-c703-4aa4-b1a4-6860fc1734db","Type":"ContainerStarted","Data":"6d81ae290b18fc7bebb9de5bf3ba76648f45182e6982ec514d194afd56117990"} Oct 06 21:48:51 crc kubenswrapper[5014]: I1006 21:48:51.198791 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6ht9f" event={"ID":"86104dc0-8799-4cbd-bead-038f3358dfba","Type":"ContainerDied","Data":"f701adf45d021767cee632d7df52d9d44ca5cc0adede5c5a6824fdaa457dcefe"} Oct 06 21:48:51 crc kubenswrapper[5014]: I1006 21:48:51.198835 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f701adf45d021767cee632d7df52d9d44ca5cc0adede5c5a6824fdaa457dcefe" Oct 06 21:48:51 crc kubenswrapper[5014]: I1006 21:48:51.198919 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6ht9f" Oct 06 21:48:52 crc kubenswrapper[5014]: I1006 21:48:52.672887 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:48:52 crc kubenswrapper[5014]: I1006 21:48:52.730962 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57f58c7cff-sh7hf"] Oct 06 21:48:52 crc kubenswrapper[5014]: I1006 21:48:52.731181 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" podUID="3956e460-e179-438d-be81-7af1dc5fcfe8" containerName="dnsmasq-dns" containerID="cri-o://9984bfb89d550dfd25e4f5866d31fdaf0ecb771bdb962caaf7cb9f1f81dcf9ea" gracePeriod=10 Oct 06 21:48:53 crc kubenswrapper[5014]: I1006 21:48:53.216648 5014 generic.go:334] "Generic (PLEG): container finished" podID="2a133d28-39e7-4768-83c0-9b59bef04241" containerID="1d44e59a2f427f4344ed685e43320717bbfc68cd82e91134294275a2865561e9" exitCode=0 Oct 06 21:48:53 crc kubenswrapper[5014]: I1006 21:48:53.216816 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jc6q5" event={"ID":"2a133d28-39e7-4768-83c0-9b59bef04241","Type":"ContainerDied","Data":"1d44e59a2f427f4344ed685e43320717bbfc68cd82e91134294275a2865561e9"} Oct 06 21:48:53 crc kubenswrapper[5014]: I1006 21:48:53.227458 5014 generic.go:334] "Generic (PLEG): container finished" podID="3956e460-e179-438d-be81-7af1dc5fcfe8" containerID="9984bfb89d550dfd25e4f5866d31fdaf0ecb771bdb962caaf7cb9f1f81dcf9ea" exitCode=0 Oct 06 21:48:53 crc kubenswrapper[5014]: I1006 21:48:53.227512 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" event={"ID":"3956e460-e179-438d-be81-7af1dc5fcfe8","Type":"ContainerDied","Data":"9984bfb89d550dfd25e4f5866d31fdaf0ecb771bdb962caaf7cb9f1f81dcf9ea"} Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.570402 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.577378 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jc6q5" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.734045 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-db-sync-config-data\") pod \"2a133d28-39e7-4768-83c0-9b59bef04241\" (UID: \"2a133d28-39e7-4768-83c0-9b59bef04241\") " Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.734097 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-config-data\") pod \"2a133d28-39e7-4768-83c0-9b59bef04241\" (UID: \"2a133d28-39e7-4768-83c0-9b59bef04241\") " Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.734201 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mzqv\" (UniqueName: \"kubernetes.io/projected/3956e460-e179-438d-be81-7af1dc5fcfe8-kube-api-access-7mzqv\") pod \"3956e460-e179-438d-be81-7af1dc5fcfe8\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.734252 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-combined-ca-bundle\") pod \"2a133d28-39e7-4768-83c0-9b59bef04241\" (UID: \"2a133d28-39e7-4768-83c0-9b59bef04241\") " Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.734287 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-config\") pod \"3956e460-e179-438d-be81-7af1dc5fcfe8\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.734353 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-ovsdbserver-nb\") pod \"3956e460-e179-438d-be81-7af1dc5fcfe8\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.734393 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-dns-svc\") pod \"3956e460-e179-438d-be81-7af1dc5fcfe8\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.735249 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-ovsdbserver-sb\") pod \"3956e460-e179-438d-be81-7af1dc5fcfe8\" (UID: \"3956e460-e179-438d-be81-7af1dc5fcfe8\") " Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.735318 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gr2kt\" (UniqueName: \"kubernetes.io/projected/2a133d28-39e7-4768-83c0-9b59bef04241-kube-api-access-gr2kt\") pod \"2a133d28-39e7-4768-83c0-9b59bef04241\" (UID: \"2a133d28-39e7-4768-83c0-9b59bef04241\") " Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.740803 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3956e460-e179-438d-be81-7af1dc5fcfe8-kube-api-access-7mzqv" (OuterVolumeSpecName: "kube-api-access-7mzqv") pod "3956e460-e179-438d-be81-7af1dc5fcfe8" (UID: "3956e460-e179-438d-be81-7af1dc5fcfe8"). InnerVolumeSpecName "kube-api-access-7mzqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.741009 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "2a133d28-39e7-4768-83c0-9b59bef04241" (UID: "2a133d28-39e7-4768-83c0-9b59bef04241"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.741876 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a133d28-39e7-4768-83c0-9b59bef04241-kube-api-access-gr2kt" (OuterVolumeSpecName: "kube-api-access-gr2kt") pod "2a133d28-39e7-4768-83c0-9b59bef04241" (UID: "2a133d28-39e7-4768-83c0-9b59bef04241"). InnerVolumeSpecName "kube-api-access-gr2kt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.780451 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3956e460-e179-438d-be81-7af1dc5fcfe8" (UID: "3956e460-e179-438d-be81-7af1dc5fcfe8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.784910 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-config-data" (OuterVolumeSpecName: "config-data") pod "2a133d28-39e7-4768-83c0-9b59bef04241" (UID: "2a133d28-39e7-4768-83c0-9b59bef04241"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.788265 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2a133d28-39e7-4768-83c0-9b59bef04241" (UID: "2a133d28-39e7-4768-83c0-9b59bef04241"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.791249 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3956e460-e179-438d-be81-7af1dc5fcfe8" (UID: "3956e460-e179-438d-be81-7af1dc5fcfe8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.796355 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3956e460-e179-438d-be81-7af1dc5fcfe8" (UID: "3956e460-e179-438d-be81-7af1dc5fcfe8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.836907 5014 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.836941 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.836951 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mzqv\" (UniqueName: \"kubernetes.io/projected/3956e460-e179-438d-be81-7af1dc5fcfe8-kube-api-access-7mzqv\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.836963 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a133d28-39e7-4768-83c0-9b59bef04241-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.836972 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.836980 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.836989 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.836997 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gr2kt\" (UniqueName: \"kubernetes.io/projected/2a133d28-39e7-4768-83c0-9b59bef04241-kube-api-access-gr2kt\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.841220 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-config" (OuterVolumeSpecName: "config") pod "3956e460-e179-438d-be81-7af1dc5fcfe8" (UID: "3956e460-e179-438d-be81-7af1dc5fcfe8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:48:55 crc kubenswrapper[5014]: I1006 21:48:55.938541 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3956e460-e179-438d-be81-7af1dc5fcfe8-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:48:56 crc kubenswrapper[5014]: I1006 21:48:56.262552 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" Oct 06 21:48:56 crc kubenswrapper[5014]: I1006 21:48:56.262531 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57f58c7cff-sh7hf" event={"ID":"3956e460-e179-438d-be81-7af1dc5fcfe8","Type":"ContainerDied","Data":"7d59fcb901e0e7587a415b46b9dcac3a54129f48dfe9bcdbdad735066b682d31"} Oct 06 21:48:56 crc kubenswrapper[5014]: I1006 21:48:56.263201 5014 scope.go:117] "RemoveContainer" containerID="9984bfb89d550dfd25e4f5866d31fdaf0ecb771bdb962caaf7cb9f1f81dcf9ea" Oct 06 21:48:56 crc kubenswrapper[5014]: I1006 21:48:56.264945 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-t5wff" event={"ID":"8069cfef-c703-4aa4-b1a4-6860fc1734db","Type":"ContainerStarted","Data":"db7cc9b68ef3b4b78479e40f31e675c8f6b9a9f82735c43dd4837eaf2ba328d2"} Oct 06 21:48:56 crc kubenswrapper[5014]: I1006 21:48:56.269599 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jc6q5" event={"ID":"2a133d28-39e7-4768-83c0-9b59bef04241","Type":"ContainerDied","Data":"58b52c3ee905a320064b8092d7c9e5e7fd85fdc658b0d5889dfa424dd651f9b6"} Oct 06 21:48:56 crc kubenswrapper[5014]: I1006 21:48:56.269668 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="58b52c3ee905a320064b8092d7c9e5e7fd85fdc658b0d5889dfa424dd651f9b6" Oct 06 21:48:56 crc kubenswrapper[5014]: I1006 21:48:56.269796 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jc6q5" Oct 06 21:48:56 crc kubenswrapper[5014]: I1006 21:48:56.315438 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-t5wff" podStartSLOduration=1.732438723 podStartE2EDuration="6.315375857s" podCreationTimestamp="2025-10-06 21:48:50 +0000 UTC" firstStartedPulling="2025-10-06 21:48:51.028072075 +0000 UTC m=+1076.321108809" lastFinishedPulling="2025-10-06 21:48:55.611009209 +0000 UTC m=+1080.904045943" observedRunningTime="2025-10-06 21:48:56.292593034 +0000 UTC m=+1081.585629808" watchObservedRunningTime="2025-10-06 21:48:56.315375857 +0000 UTC m=+1081.608412631" Oct 06 21:48:56 crc kubenswrapper[5014]: I1006 21:48:56.318003 5014 scope.go:117] "RemoveContainer" containerID="c214ac2d814bae9c02ab2d0dae1a8a08701dce19c9581273dc223b7ee0546a88" Oct 06 21:48:56 crc kubenswrapper[5014]: I1006 21:48:56.334284 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57f58c7cff-sh7hf"] Oct 06 21:48:56 crc kubenswrapper[5014]: I1006 21:48:56.343312 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57f58c7cff-sh7hf"] Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.039806 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cf6f78f57-t2qwv"] Oct 06 21:48:57 crc kubenswrapper[5014]: E1006 21:48:57.040118 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c" containerName="mariadb-database-create" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.040129 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c" containerName="mariadb-database-create" Oct 06 21:48:57 crc kubenswrapper[5014]: E1006 21:48:57.040141 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3956e460-e179-438d-be81-7af1dc5fcfe8" containerName="dnsmasq-dns" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.040147 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="3956e460-e179-438d-be81-7af1dc5fcfe8" containerName="dnsmasq-dns" Oct 06 21:48:57 crc kubenswrapper[5014]: E1006 21:48:57.040169 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a133d28-39e7-4768-83c0-9b59bef04241" containerName="glance-db-sync" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.040175 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a133d28-39e7-4768-83c0-9b59bef04241" containerName="glance-db-sync" Oct 06 21:48:57 crc kubenswrapper[5014]: E1006 21:48:57.040185 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86104dc0-8799-4cbd-bead-038f3358dfba" containerName="mariadb-database-create" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.040190 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="86104dc0-8799-4cbd-bead-038f3358dfba" containerName="mariadb-database-create" Oct 06 21:48:57 crc kubenswrapper[5014]: E1006 21:48:57.040204 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c" containerName="mariadb-database-create" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.040211 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c" containerName="mariadb-database-create" Oct 06 21:48:57 crc kubenswrapper[5014]: E1006 21:48:57.040226 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3956e460-e179-438d-be81-7af1dc5fcfe8" containerName="init" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.040232 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="3956e460-e179-438d-be81-7af1dc5fcfe8" containerName="init" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.040388 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="86104dc0-8799-4cbd-bead-038f3358dfba" containerName="mariadb-database-create" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.040397 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c" containerName="mariadb-database-create" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.040412 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="3956e460-e179-438d-be81-7af1dc5fcfe8" containerName="dnsmasq-dns" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.040422 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a133d28-39e7-4768-83c0-9b59bef04241" containerName="glance-db-sync" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.040434 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c" containerName="mariadb-database-create" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.046833 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.063968 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-ovsdbserver-nb\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.064018 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-dns-svc\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.064041 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-ovsdbserver-sb\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.064108 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-dns-swift-storage-0\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.064187 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9sspn\" (UniqueName: \"kubernetes.io/projected/7617aa8b-ee26-4233-94a9-12af98518454-kube-api-access-9sspn\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.064208 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-config\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.080922 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf6f78f57-t2qwv"] Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.173024 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-dns-swift-storage-0\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.173104 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9sspn\" (UniqueName: \"kubernetes.io/projected/7617aa8b-ee26-4233-94a9-12af98518454-kube-api-access-9sspn\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.173123 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-config\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.173154 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-ovsdbserver-nb\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.173180 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-dns-svc\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.173194 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-ovsdbserver-sb\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.176013 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-dns-swift-storage-0\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.178495 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-ovsdbserver-nb\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.180606 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-ovsdbserver-sb\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.188288 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-config\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.188835 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-dns-svc\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.210344 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9sspn\" (UniqueName: \"kubernetes.io/projected/7617aa8b-ee26-4233-94a9-12af98518454-kube-api-access-9sspn\") pod \"dnsmasq-dns-cf6f78f57-t2qwv\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.304699 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-b6a2-account-create-6h8mw"] Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.306145 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b6a2-account-create-6h8mw" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.314258 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.321175 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-b6a2-account-create-6h8mw"] Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.347422 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-3af6-account-create-wt6wz"] Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.378224 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3af6-account-create-wt6wz" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.384584 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.393158 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-3af6-account-create-wt6wz"] Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.403237 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.485012 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6ttf\" (UniqueName: \"kubernetes.io/projected/5775db60-8257-4c71-ae41-2fd585c2a108-kube-api-access-c6ttf\") pod \"barbican-b6a2-account-create-6h8mw\" (UID: \"5775db60-8257-4c71-ae41-2fd585c2a108\") " pod="openstack/barbican-b6a2-account-create-6h8mw" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.485057 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jf75f\" (UniqueName: \"kubernetes.io/projected/848d175d-ed70-4686-b0bf-5f07deb65fb1-kube-api-access-jf75f\") pod \"cinder-3af6-account-create-wt6wz\" (UID: \"848d175d-ed70-4686-b0bf-5f07deb65fb1\") " pod="openstack/cinder-3af6-account-create-wt6wz" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.502942 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3956e460-e179-438d-be81-7af1dc5fcfe8" path="/var/lib/kubelet/pods/3956e460-e179-438d-be81-7af1dc5fcfe8/volumes" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.527592 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-2e7c-account-create-g9chd"] Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.533295 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-2e7c-account-create-g9chd" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.535587 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.553688 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-2e7c-account-create-g9chd"] Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.586218 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6ttf\" (UniqueName: \"kubernetes.io/projected/5775db60-8257-4c71-ae41-2fd585c2a108-kube-api-access-c6ttf\") pod \"barbican-b6a2-account-create-6h8mw\" (UID: \"5775db60-8257-4c71-ae41-2fd585c2a108\") " pod="openstack/barbican-b6a2-account-create-6h8mw" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.586268 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jf75f\" (UniqueName: \"kubernetes.io/projected/848d175d-ed70-4686-b0bf-5f07deb65fb1-kube-api-access-jf75f\") pod \"cinder-3af6-account-create-wt6wz\" (UID: \"848d175d-ed70-4686-b0bf-5f07deb65fb1\") " pod="openstack/cinder-3af6-account-create-wt6wz" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.611013 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6ttf\" (UniqueName: \"kubernetes.io/projected/5775db60-8257-4c71-ae41-2fd585c2a108-kube-api-access-c6ttf\") pod \"barbican-b6a2-account-create-6h8mw\" (UID: \"5775db60-8257-4c71-ae41-2fd585c2a108\") " pod="openstack/barbican-b6a2-account-create-6h8mw" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.626517 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jf75f\" (UniqueName: \"kubernetes.io/projected/848d175d-ed70-4686-b0bf-5f07deb65fb1-kube-api-access-jf75f\") pod \"cinder-3af6-account-create-wt6wz\" (UID: \"848d175d-ed70-4686-b0bf-5f07deb65fb1\") " pod="openstack/cinder-3af6-account-create-wt6wz" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.662276 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b6a2-account-create-6h8mw" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.688652 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hswx2\" (UniqueName: \"kubernetes.io/projected/6b04355a-0ce8-4929-8ba6-b8aca24d4daa-kube-api-access-hswx2\") pod \"neutron-2e7c-account-create-g9chd\" (UID: \"6b04355a-0ce8-4929-8ba6-b8aca24d4daa\") " pod="openstack/neutron-2e7c-account-create-g9chd" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.714722 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3af6-account-create-wt6wz" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.790166 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hswx2\" (UniqueName: \"kubernetes.io/projected/6b04355a-0ce8-4929-8ba6-b8aca24d4daa-kube-api-access-hswx2\") pod \"neutron-2e7c-account-create-g9chd\" (UID: \"6b04355a-0ce8-4929-8ba6-b8aca24d4daa\") " pod="openstack/neutron-2e7c-account-create-g9chd" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.813815 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hswx2\" (UniqueName: \"kubernetes.io/projected/6b04355a-0ce8-4929-8ba6-b8aca24d4daa-kube-api-access-hswx2\") pod \"neutron-2e7c-account-create-g9chd\" (UID: \"6b04355a-0ce8-4929-8ba6-b8aca24d4daa\") " pod="openstack/neutron-2e7c-account-create-g9chd" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.863825 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-2e7c-account-create-g9chd" Oct 06 21:48:57 crc kubenswrapper[5014]: I1006 21:48:57.929564 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf6f78f57-t2qwv"] Oct 06 21:48:58 crc kubenswrapper[5014]: I1006 21:48:58.171221 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-b6a2-account-create-6h8mw"] Oct 06 21:48:58 crc kubenswrapper[5014]: W1006 21:48:58.175145 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5775db60_8257_4c71_ae41_2fd585c2a108.slice/crio-1afc8e1f715bb284508eaca631aeb6c375abba0ebf9d205cae8a08fc59eb44d3 WatchSource:0}: Error finding container 1afc8e1f715bb284508eaca631aeb6c375abba0ebf9d205cae8a08fc59eb44d3: Status 404 returned error can't find the container with id 1afc8e1f715bb284508eaca631aeb6c375abba0ebf9d205cae8a08fc59eb44d3 Oct 06 21:48:58 crc kubenswrapper[5014]: I1006 21:48:58.277609 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-3af6-account-create-wt6wz"] Oct 06 21:48:58 crc kubenswrapper[5014]: I1006 21:48:58.328015 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b6a2-account-create-6h8mw" event={"ID":"5775db60-8257-4c71-ae41-2fd585c2a108","Type":"ContainerStarted","Data":"1afc8e1f715bb284508eaca631aeb6c375abba0ebf9d205cae8a08fc59eb44d3"} Oct 06 21:48:58 crc kubenswrapper[5014]: I1006 21:48:58.329849 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" event={"ID":"7617aa8b-ee26-4233-94a9-12af98518454","Type":"ContainerStarted","Data":"3f970561d8e2b43e833801f061a4dc1c9c9749f9731af982a6d16728861087cd"} Oct 06 21:48:58 crc kubenswrapper[5014]: I1006 21:48:58.331466 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3af6-account-create-wt6wz" event={"ID":"848d175d-ed70-4686-b0bf-5f07deb65fb1","Type":"ContainerStarted","Data":"d9e3337c3583d6c8b45f5ef52b40bf806e58b3ca0a05aa30f7d05878c060f780"} Oct 06 21:48:58 crc kubenswrapper[5014]: I1006 21:48:58.362900 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-2e7c-account-create-g9chd"] Oct 06 21:48:59 crc kubenswrapper[5014]: I1006 21:48:59.342147 5014 generic.go:334] "Generic (PLEG): container finished" podID="7617aa8b-ee26-4233-94a9-12af98518454" containerID="0cf27772199ef6a3e669ce4cf42de172725b9669ac7316a55a315f1d212ffb07" exitCode=0 Oct 06 21:48:59 crc kubenswrapper[5014]: I1006 21:48:59.342236 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" event={"ID":"7617aa8b-ee26-4233-94a9-12af98518454","Type":"ContainerDied","Data":"0cf27772199ef6a3e669ce4cf42de172725b9669ac7316a55a315f1d212ffb07"} Oct 06 21:48:59 crc kubenswrapper[5014]: I1006 21:48:59.344562 5014 generic.go:334] "Generic (PLEG): container finished" podID="848d175d-ed70-4686-b0bf-5f07deb65fb1" containerID="2eaef8680c09830198d88643a8f0a39fc12acfe60502da9bd0cbfac3d647db18" exitCode=0 Oct 06 21:48:59 crc kubenswrapper[5014]: I1006 21:48:59.344714 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3af6-account-create-wt6wz" event={"ID":"848d175d-ed70-4686-b0bf-5f07deb65fb1","Type":"ContainerDied","Data":"2eaef8680c09830198d88643a8f0a39fc12acfe60502da9bd0cbfac3d647db18"} Oct 06 21:48:59 crc kubenswrapper[5014]: I1006 21:48:59.346972 5014 generic.go:334] "Generic (PLEG): container finished" podID="5775db60-8257-4c71-ae41-2fd585c2a108" containerID="eccd817ee0632125e243a540a43507cb9e89329aba9b2105894b28f4cc60d162" exitCode=0 Oct 06 21:48:59 crc kubenswrapper[5014]: I1006 21:48:59.347061 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b6a2-account-create-6h8mw" event={"ID":"5775db60-8257-4c71-ae41-2fd585c2a108","Type":"ContainerDied","Data":"eccd817ee0632125e243a540a43507cb9e89329aba9b2105894b28f4cc60d162"} Oct 06 21:48:59 crc kubenswrapper[5014]: I1006 21:48:59.349117 5014 generic.go:334] "Generic (PLEG): container finished" podID="6b04355a-0ce8-4929-8ba6-b8aca24d4daa" containerID="f692dfcc2492eae5404168857917e870a27a027006ac51db96b1400629f12e89" exitCode=0 Oct 06 21:48:59 crc kubenswrapper[5014]: I1006 21:48:59.349179 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-2e7c-account-create-g9chd" event={"ID":"6b04355a-0ce8-4929-8ba6-b8aca24d4daa","Type":"ContainerDied","Data":"f692dfcc2492eae5404168857917e870a27a027006ac51db96b1400629f12e89"} Oct 06 21:48:59 crc kubenswrapper[5014]: I1006 21:48:59.349234 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-2e7c-account-create-g9chd" event={"ID":"6b04355a-0ce8-4929-8ba6-b8aca24d4daa","Type":"ContainerStarted","Data":"ef39e8483c66562396b4af387bee823a91de4e6565827cb8102106a2185145b6"} Oct 06 21:49:00 crc kubenswrapper[5014]: I1006 21:49:00.365364 5014 generic.go:334] "Generic (PLEG): container finished" podID="8069cfef-c703-4aa4-b1a4-6860fc1734db" containerID="db7cc9b68ef3b4b78479e40f31e675c8f6b9a9f82735c43dd4837eaf2ba328d2" exitCode=0 Oct 06 21:49:00 crc kubenswrapper[5014]: I1006 21:49:00.365421 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-t5wff" event={"ID":"8069cfef-c703-4aa4-b1a4-6860fc1734db","Type":"ContainerDied","Data":"db7cc9b68ef3b4b78479e40f31e675c8f6b9a9f82735c43dd4837eaf2ba328d2"} Oct 06 21:49:00 crc kubenswrapper[5014]: I1006 21:49:00.370829 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" event={"ID":"7617aa8b-ee26-4233-94a9-12af98518454","Type":"ContainerStarted","Data":"37c938bf58f77b8f6b4022e7751be1301fdd981d4299fb4a47514e6b5f79dfcc"} Oct 06 21:49:00 crc kubenswrapper[5014]: I1006 21:49:00.370858 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:49:00 crc kubenswrapper[5014]: I1006 21:49:00.418752 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" podStartSLOduration=3.418722417 podStartE2EDuration="3.418722417s" podCreationTimestamp="2025-10-06 21:48:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:00.403415111 +0000 UTC m=+1085.696451865" watchObservedRunningTime="2025-10-06 21:49:00.418722417 +0000 UTC m=+1085.711759181" Oct 06 21:49:00 crc kubenswrapper[5014]: I1006 21:49:00.825440 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3af6-account-create-wt6wz" Oct 06 21:49:00 crc kubenswrapper[5014]: I1006 21:49:00.847767 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-2e7c-account-create-g9chd" Oct 06 21:49:00 crc kubenswrapper[5014]: I1006 21:49:00.849985 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hswx2\" (UniqueName: \"kubernetes.io/projected/6b04355a-0ce8-4929-8ba6-b8aca24d4daa-kube-api-access-hswx2\") pod \"6b04355a-0ce8-4929-8ba6-b8aca24d4daa\" (UID: \"6b04355a-0ce8-4929-8ba6-b8aca24d4daa\") " Oct 06 21:49:00 crc kubenswrapper[5014]: I1006 21:49:00.850099 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jf75f\" (UniqueName: \"kubernetes.io/projected/848d175d-ed70-4686-b0bf-5f07deb65fb1-kube-api-access-jf75f\") pod \"848d175d-ed70-4686-b0bf-5f07deb65fb1\" (UID: \"848d175d-ed70-4686-b0bf-5f07deb65fb1\") " Oct 06 21:49:00 crc kubenswrapper[5014]: I1006 21:49:00.857708 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/848d175d-ed70-4686-b0bf-5f07deb65fb1-kube-api-access-jf75f" (OuterVolumeSpecName: "kube-api-access-jf75f") pod "848d175d-ed70-4686-b0bf-5f07deb65fb1" (UID: "848d175d-ed70-4686-b0bf-5f07deb65fb1"). InnerVolumeSpecName "kube-api-access-jf75f". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:00 crc kubenswrapper[5014]: I1006 21:49:00.869602 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b04355a-0ce8-4929-8ba6-b8aca24d4daa-kube-api-access-hswx2" (OuterVolumeSpecName: "kube-api-access-hswx2") pod "6b04355a-0ce8-4929-8ba6-b8aca24d4daa" (UID: "6b04355a-0ce8-4929-8ba6-b8aca24d4daa"). InnerVolumeSpecName "kube-api-access-hswx2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:00 crc kubenswrapper[5014]: I1006 21:49:00.874132 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b6a2-account-create-6h8mw" Oct 06 21:49:00 crc kubenswrapper[5014]: I1006 21:49:00.951914 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6ttf\" (UniqueName: \"kubernetes.io/projected/5775db60-8257-4c71-ae41-2fd585c2a108-kube-api-access-c6ttf\") pod \"5775db60-8257-4c71-ae41-2fd585c2a108\" (UID: \"5775db60-8257-4c71-ae41-2fd585c2a108\") " Oct 06 21:49:00 crc kubenswrapper[5014]: I1006 21:49:00.952560 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jf75f\" (UniqueName: \"kubernetes.io/projected/848d175d-ed70-4686-b0bf-5f07deb65fb1-kube-api-access-jf75f\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:00 crc kubenswrapper[5014]: I1006 21:49:00.952587 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hswx2\" (UniqueName: \"kubernetes.io/projected/6b04355a-0ce8-4929-8ba6-b8aca24d4daa-kube-api-access-hswx2\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:00 crc kubenswrapper[5014]: I1006 21:49:00.955273 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5775db60-8257-4c71-ae41-2fd585c2a108-kube-api-access-c6ttf" (OuterVolumeSpecName: "kube-api-access-c6ttf") pod "5775db60-8257-4c71-ae41-2fd585c2a108" (UID: "5775db60-8257-4c71-ae41-2fd585c2a108"). InnerVolumeSpecName "kube-api-access-c6ttf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.054965 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6ttf\" (UniqueName: \"kubernetes.io/projected/5775db60-8257-4c71-ae41-2fd585c2a108-kube-api-access-c6ttf\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.382934 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3af6-account-create-wt6wz" Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.382931 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3af6-account-create-wt6wz" event={"ID":"848d175d-ed70-4686-b0bf-5f07deb65fb1","Type":"ContainerDied","Data":"d9e3337c3583d6c8b45f5ef52b40bf806e58b3ca0a05aa30f7d05878c060f780"} Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.383100 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d9e3337c3583d6c8b45f5ef52b40bf806e58b3ca0a05aa30f7d05878c060f780" Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.385224 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b6a2-account-create-6h8mw" Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.385230 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b6a2-account-create-6h8mw" event={"ID":"5775db60-8257-4c71-ae41-2fd585c2a108","Type":"ContainerDied","Data":"1afc8e1f715bb284508eaca631aeb6c375abba0ebf9d205cae8a08fc59eb44d3"} Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.385471 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1afc8e1f715bb284508eaca631aeb6c375abba0ebf9d205cae8a08fc59eb44d3" Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.387724 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-2e7c-account-create-g9chd" event={"ID":"6b04355a-0ce8-4929-8ba6-b8aca24d4daa","Type":"ContainerDied","Data":"ef39e8483c66562396b4af387bee823a91de4e6565827cb8102106a2185145b6"} Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.387792 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef39e8483c66562396b4af387bee823a91de4e6565827cb8102106a2185145b6" Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.387843 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-2e7c-account-create-g9chd" Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.783762 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-t5wff" Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.869038 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8069cfef-c703-4aa4-b1a4-6860fc1734db-config-data\") pod \"8069cfef-c703-4aa4-b1a4-6860fc1734db\" (UID: \"8069cfef-c703-4aa4-b1a4-6860fc1734db\") " Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.870676 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qmlc\" (UniqueName: \"kubernetes.io/projected/8069cfef-c703-4aa4-b1a4-6860fc1734db-kube-api-access-2qmlc\") pod \"8069cfef-c703-4aa4-b1a4-6860fc1734db\" (UID: \"8069cfef-c703-4aa4-b1a4-6860fc1734db\") " Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.870755 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8069cfef-c703-4aa4-b1a4-6860fc1734db-combined-ca-bundle\") pod \"8069cfef-c703-4aa4-b1a4-6860fc1734db\" (UID: \"8069cfef-c703-4aa4-b1a4-6860fc1734db\") " Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.876674 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8069cfef-c703-4aa4-b1a4-6860fc1734db-kube-api-access-2qmlc" (OuterVolumeSpecName: "kube-api-access-2qmlc") pod "8069cfef-c703-4aa4-b1a4-6860fc1734db" (UID: "8069cfef-c703-4aa4-b1a4-6860fc1734db"). InnerVolumeSpecName "kube-api-access-2qmlc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.914517 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8069cfef-c703-4aa4-b1a4-6860fc1734db-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8069cfef-c703-4aa4-b1a4-6860fc1734db" (UID: "8069cfef-c703-4aa4-b1a4-6860fc1734db"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.933128 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8069cfef-c703-4aa4-b1a4-6860fc1734db-config-data" (OuterVolumeSpecName: "config-data") pod "8069cfef-c703-4aa4-b1a4-6860fc1734db" (UID: "8069cfef-c703-4aa4-b1a4-6860fc1734db"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.973386 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qmlc\" (UniqueName: \"kubernetes.io/projected/8069cfef-c703-4aa4-b1a4-6860fc1734db-kube-api-access-2qmlc\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.973481 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8069cfef-c703-4aa4-b1a4-6860fc1734db-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:01 crc kubenswrapper[5014]: I1006 21:49:01.973555 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8069cfef-c703-4aa4-b1a4-6860fc1734db-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.399868 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-t5wff" event={"ID":"8069cfef-c703-4aa4-b1a4-6860fc1734db","Type":"ContainerDied","Data":"6d81ae290b18fc7bebb9de5bf3ba76648f45182e6982ec514d194afd56117990"} Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.399911 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d81ae290b18fc7bebb9de5bf3ba76648f45182e6982ec514d194afd56117990" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.399951 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-t5wff" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.610214 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-wmc4z"] Oct 06 21:49:02 crc kubenswrapper[5014]: E1006 21:49:02.610654 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8069cfef-c703-4aa4-b1a4-6860fc1734db" containerName="keystone-db-sync" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.610670 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8069cfef-c703-4aa4-b1a4-6860fc1734db" containerName="keystone-db-sync" Oct 06 21:49:02 crc kubenswrapper[5014]: E1006 21:49:02.610677 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5775db60-8257-4c71-ae41-2fd585c2a108" containerName="mariadb-account-create" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.610684 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5775db60-8257-4c71-ae41-2fd585c2a108" containerName="mariadb-account-create" Oct 06 21:49:02 crc kubenswrapper[5014]: E1006 21:49:02.610698 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="848d175d-ed70-4686-b0bf-5f07deb65fb1" containerName="mariadb-account-create" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.610708 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="848d175d-ed70-4686-b0bf-5f07deb65fb1" containerName="mariadb-account-create" Oct 06 21:49:02 crc kubenswrapper[5014]: E1006 21:49:02.610719 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b04355a-0ce8-4929-8ba6-b8aca24d4daa" containerName="mariadb-account-create" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.610726 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b04355a-0ce8-4929-8ba6-b8aca24d4daa" containerName="mariadb-account-create" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.610878 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="8069cfef-c703-4aa4-b1a4-6860fc1734db" containerName="keystone-db-sync" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.610893 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="848d175d-ed70-4686-b0bf-5f07deb65fb1" containerName="mariadb-account-create" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.610903 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5775db60-8257-4c71-ae41-2fd585c2a108" containerName="mariadb-account-create" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.610913 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b04355a-0ce8-4929-8ba6-b8aca24d4daa" containerName="mariadb-account-create" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.611448 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.614977 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.615128 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.615257 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-xch9b" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.618469 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-wmc4z"] Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.626258 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.633383 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf6f78f57-t2qwv"] Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.633594 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" podUID="7617aa8b-ee26-4233-94a9-12af98518454" containerName="dnsmasq-dns" containerID="cri-o://37c938bf58f77b8f6b4022e7751be1301fdd981d4299fb4a47514e6b5f79dfcc" gracePeriod=10 Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.686753 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-combined-ca-bundle\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.686817 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-config-data\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.686841 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-scripts\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.686858 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-fernet-keys\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.686889 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-credential-keys\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.686919 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67sjm\" (UniqueName: \"kubernetes.io/projected/cdebd302-11d5-44be-9893-f9f408c8a7d0-kube-api-access-67sjm\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.739368 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8d44b7457-xcmp6"] Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.740666 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.772932 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8d44b7457-xcmp6"] Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.794975 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-dns-svc\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.795034 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-ovsdbserver-nb\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.795063 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-combined-ca-bundle\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.795122 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-config-data\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.795146 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-scripts\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.795164 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-config\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.795185 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dzld\" (UniqueName: \"kubernetes.io/projected/6a646f3e-89a6-4157-9b41-eefd507dd76e-kube-api-access-9dzld\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.795201 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-fernet-keys\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.795233 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-dns-swift-storage-0\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.795252 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-credential-keys\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.795275 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-ovsdbserver-sb\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.795304 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67sjm\" (UniqueName: \"kubernetes.io/projected/cdebd302-11d5-44be-9893-f9f408c8a7d0-kube-api-access-67sjm\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.805900 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-fernet-keys\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.813397 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-config-data\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.814791 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-scripts\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.824269 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-combined-ca-bundle\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.826249 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-credential-keys\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.843053 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67sjm\" (UniqueName: \"kubernetes.io/projected/cdebd302-11d5-44be-9893-f9f408c8a7d0-kube-api-access-67sjm\") pod \"keystone-bootstrap-wmc4z\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.855732 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-pwpps"] Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.856924 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-pwpps" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.860924 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.861134 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-vt98g" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.861317 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.870479 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.872485 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.875012 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.875161 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.888429 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-79rxq"] Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.909866 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.917307 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.922728 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-8h4pd" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.924336 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-dns-svc\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.924438 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-ovsdbserver-nb\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.924557 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-config\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.927161 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.927922 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-config\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.928103 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dzld\" (UniqueName: \"kubernetes.io/projected/6a646f3e-89a6-4157-9b41-eefd507dd76e-kube-api-access-9dzld\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.928170 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-dns-swift-storage-0\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.928211 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-ovsdbserver-sb\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.928489 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-dns-svc\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.928893 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-ovsdbserver-sb\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.929998 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-dns-swift-storage-0\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.932111 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-ovsdbserver-nb\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.958821 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.978301 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-pwpps"] Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.984636 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dzld\" (UniqueName: \"kubernetes.io/projected/6a646f3e-89a6-4157-9b41-eefd507dd76e-kube-api-access-9dzld\") pod \"dnsmasq-dns-8d44b7457-xcmp6\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:02 crc kubenswrapper[5014]: I1006 21:49:02.997017 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.032120 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwzdw\" (UniqueName: \"kubernetes.io/projected/b198c4e1-6133-4729-b58a-c83946d45a5d-kube-api-access-wwzdw\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.032168 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-db-sync-config-data\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.032199 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-combined-ca-bundle\") pod \"neutron-db-sync-pwpps\" (UID: \"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6\") " pod="openstack/neutron-db-sync-pwpps" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.032219 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-config\") pod \"neutron-db-sync-pwpps\" (UID: \"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6\") " pod="openstack/neutron-db-sync-pwpps" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.032238 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-scripts\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.032254 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vt76s\" (UniqueName: \"kubernetes.io/projected/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-kube-api-access-vt76s\") pod \"neutron-db-sync-pwpps\" (UID: \"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6\") " pod="openstack/neutron-db-sync-pwpps" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.032284 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.032306 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-combined-ca-bundle\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.032337 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8a58cf95-3f4c-4369-acbf-117df5f667be-log-httpd\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.032352 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8a58cf95-3f4c-4369-acbf-117df5f667be-run-httpd\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.032375 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzhx4\" (UniqueName: \"kubernetes.io/projected/8a58cf95-3f4c-4369-acbf-117df5f667be-kube-api-access-lzhx4\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.032394 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.032411 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-config-data\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.032426 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b198c4e1-6133-4729-b58a-c83946d45a5d-etc-machine-id\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.032460 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-scripts\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.032480 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-config-data\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.042447 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-79rxq"] Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.068835 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-rf5xr"] Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.071189 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.076661 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.076720 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-sf5kp" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.077210 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.094507 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-rf5xr"] Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.117271 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-62g9k"] Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.118774 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-62g9k" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.124998 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8d44b7457-xcmp6"] Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.125783 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.128037 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.132096 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-2jlk6" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.134803 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-config\") pod \"neutron-db-sync-pwpps\" (UID: \"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6\") " pod="openstack/neutron-db-sync-pwpps" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.134845 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-scripts\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.134872 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vt76s\" (UniqueName: \"kubernetes.io/projected/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-kube-api-access-vt76s\") pod \"neutron-db-sync-pwpps\" (UID: \"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6\") " pod="openstack/neutron-db-sync-pwpps" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.134901 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.134919 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-combined-ca-bundle\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.134964 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-combined-ca-bundle\") pod \"placement-db-sync-rf5xr\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.134989 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8a58cf95-3f4c-4369-acbf-117df5f667be-log-httpd\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135004 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-config-data\") pod \"placement-db-sync-rf5xr\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135018 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8a58cf95-3f4c-4369-acbf-117df5f667be-run-httpd\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135036 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-798m2\" (UniqueName: \"kubernetes.io/projected/b4ee6065-52e4-434d-a944-a56539092b3b-kube-api-access-798m2\") pod \"barbican-db-sync-62g9k\" (UID: \"b4ee6065-52e4-434d-a944-a56539092b3b\") " pod="openstack/barbican-db-sync-62g9k" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135056 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzhx4\" (UniqueName: \"kubernetes.io/projected/8a58cf95-3f4c-4369-acbf-117df5f667be-kube-api-access-lzhx4\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135082 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135113 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b198c4e1-6133-4729-b58a-c83946d45a5d-etc-machine-id\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135129 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-config-data\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135159 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-logs\") pod \"placement-db-sync-rf5xr\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135176 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-scripts\") pod \"placement-db-sync-rf5xr\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135197 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-scripts\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135215 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-config-data\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135235 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b4ee6065-52e4-434d-a944-a56539092b3b-db-sync-config-data\") pod \"barbican-db-sync-62g9k\" (UID: \"b4ee6065-52e4-434d-a944-a56539092b3b\") " pod="openstack/barbican-db-sync-62g9k" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135250 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ee6065-52e4-434d-a944-a56539092b3b-combined-ca-bundle\") pod \"barbican-db-sync-62g9k\" (UID: \"b4ee6065-52e4-434d-a944-a56539092b3b\") " pod="openstack/barbican-db-sync-62g9k" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135273 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4blm\" (UniqueName: \"kubernetes.io/projected/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-kube-api-access-x4blm\") pod \"placement-db-sync-rf5xr\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135290 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwzdw\" (UniqueName: \"kubernetes.io/projected/b198c4e1-6133-4729-b58a-c83946d45a5d-kube-api-access-wwzdw\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135306 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-db-sync-config-data\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.135342 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-combined-ca-bundle\") pod \"neutron-db-sync-pwpps\" (UID: \"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6\") " pod="openstack/neutron-db-sync-pwpps" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.144180 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-combined-ca-bundle\") pod \"neutron-db-sync-pwpps\" (UID: \"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6\") " pod="openstack/neutron-db-sync-pwpps" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.145959 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8a58cf95-3f4c-4369-acbf-117df5f667be-log-httpd\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.146386 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8a58cf95-3f4c-4369-acbf-117df5f667be-run-httpd\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.147783 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b198c4e1-6133-4729-b58a-c83946d45a5d-etc-machine-id\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.148273 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-scripts\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.148855 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-config\") pod \"neutron-db-sync-pwpps\" (UID: \"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6\") " pod="openstack/neutron-db-sync-pwpps" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.153987 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-62g9k"] Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.155215 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.156015 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-config-data\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.164908 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-combined-ca-bundle\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.165662 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-scripts\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.170306 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.172162 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-config-data\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.172183 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-db-sync-config-data\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.178063 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vt76s\" (UniqueName: \"kubernetes.io/projected/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-kube-api-access-vt76s\") pod \"neutron-db-sync-pwpps\" (UID: \"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6\") " pod="openstack/neutron-db-sync-pwpps" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.179145 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77dd5cf987-sjkjh"] Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.180379 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwzdw\" (UniqueName: \"kubernetes.io/projected/b198c4e1-6133-4729-b58a-c83946d45a5d-kube-api-access-wwzdw\") pod \"cinder-db-sync-79rxq\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.181143 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.184502 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzhx4\" (UniqueName: \"kubernetes.io/projected/8a58cf95-3f4c-4369-acbf-117df5f667be-kube-api-access-lzhx4\") pod \"ceilometer-0\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.209770 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77dd5cf987-sjkjh"] Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.237469 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-logs\") pod \"placement-db-sync-rf5xr\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.237533 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-dns-swift-storage-0\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.237564 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-scripts\") pod \"placement-db-sync-rf5xr\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.237628 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b4ee6065-52e4-434d-a944-a56539092b3b-db-sync-config-data\") pod \"barbican-db-sync-62g9k\" (UID: \"b4ee6065-52e4-434d-a944-a56539092b3b\") " pod="openstack/barbican-db-sync-62g9k" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.237653 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ee6065-52e4-434d-a944-a56539092b3b-combined-ca-bundle\") pod \"barbican-db-sync-62g9k\" (UID: \"b4ee6065-52e4-434d-a944-a56539092b3b\") " pod="openstack/barbican-db-sync-62g9k" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.237683 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4blm\" (UniqueName: \"kubernetes.io/projected/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-kube-api-access-x4blm\") pod \"placement-db-sync-rf5xr\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.237745 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwbq7\" (UniqueName: \"kubernetes.io/projected/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-kube-api-access-nwbq7\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.237800 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-ovsdbserver-sb\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.239010 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-logs\") pod \"placement-db-sync-rf5xr\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.239131 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-config\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.239268 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-dns-svc\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.239335 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-combined-ca-bundle\") pod \"placement-db-sync-rf5xr\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.239410 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-config-data\") pod \"placement-db-sync-rf5xr\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.239588 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-798m2\" (UniqueName: \"kubernetes.io/projected/b4ee6065-52e4-434d-a944-a56539092b3b-kube-api-access-798m2\") pod \"barbican-db-sync-62g9k\" (UID: \"b4ee6065-52e4-434d-a944-a56539092b3b\") " pod="openstack/barbican-db-sync-62g9k" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.239749 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-ovsdbserver-nb\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.242655 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-scripts\") pod \"placement-db-sync-rf5xr\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.244344 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b4ee6065-52e4-434d-a944-a56539092b3b-db-sync-config-data\") pod \"barbican-db-sync-62g9k\" (UID: \"b4ee6065-52e4-434d-a944-a56539092b3b\") " pod="openstack/barbican-db-sync-62g9k" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.246751 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-combined-ca-bundle\") pod \"placement-db-sync-rf5xr\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.249150 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-config-data\") pod \"placement-db-sync-rf5xr\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.251541 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4blm\" (UniqueName: \"kubernetes.io/projected/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-kube-api-access-x4blm\") pod \"placement-db-sync-rf5xr\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.256150 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ee6065-52e4-434d-a944-a56539092b3b-combined-ca-bundle\") pod \"barbican-db-sync-62g9k\" (UID: \"b4ee6065-52e4-434d-a944-a56539092b3b\") " pod="openstack/barbican-db-sync-62g9k" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.256212 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-798m2\" (UniqueName: \"kubernetes.io/projected/b4ee6065-52e4-434d-a944-a56539092b3b-kube-api-access-798m2\") pod \"barbican-db-sync-62g9k\" (UID: \"b4ee6065-52e4-434d-a944-a56539092b3b\") " pod="openstack/barbican-db-sync-62g9k" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.257487 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.342762 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-ovsdbserver-sb\") pod \"7617aa8b-ee26-4233-94a9-12af98518454\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.342804 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-config\") pod \"7617aa8b-ee26-4233-94a9-12af98518454\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.342849 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-dns-svc\") pod \"7617aa8b-ee26-4233-94a9-12af98518454\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.342887 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9sspn\" (UniqueName: \"kubernetes.io/projected/7617aa8b-ee26-4233-94a9-12af98518454-kube-api-access-9sspn\") pod \"7617aa8b-ee26-4233-94a9-12af98518454\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.342917 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-ovsdbserver-nb\") pod \"7617aa8b-ee26-4233-94a9-12af98518454\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.343019 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-dns-swift-storage-0\") pod \"7617aa8b-ee26-4233-94a9-12af98518454\" (UID: \"7617aa8b-ee26-4233-94a9-12af98518454\") " Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.343281 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-ovsdbserver-nb\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.343321 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-dns-swift-storage-0\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.343395 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwbq7\" (UniqueName: \"kubernetes.io/projected/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-kube-api-access-nwbq7\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.343429 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-ovsdbserver-sb\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.343455 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-config\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.343481 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-dns-svc\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.344280 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-dns-svc\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.345601 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-ovsdbserver-nb\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.347158 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-dns-swift-storage-0\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.348246 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-ovsdbserver-sb\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.353313 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-config\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.357928 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-pwpps" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.359683 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7617aa8b-ee26-4233-94a9-12af98518454-kube-api-access-9sspn" (OuterVolumeSpecName: "kube-api-access-9sspn") pod "7617aa8b-ee26-4233-94a9-12af98518454" (UID: "7617aa8b-ee26-4233-94a9-12af98518454"). InnerVolumeSpecName "kube-api-access-9sspn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.360360 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.375674 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.391047 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwbq7\" (UniqueName: \"kubernetes.io/projected/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-kube-api-access-nwbq7\") pod \"dnsmasq-dns-77dd5cf987-sjkjh\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.394250 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.400541 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7617aa8b-ee26-4233-94a9-12af98518454" (UID: "7617aa8b-ee26-4233-94a9-12af98518454"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.404646 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7617aa8b-ee26-4233-94a9-12af98518454" (UID: "7617aa8b-ee26-4233-94a9-12af98518454"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.424083 5014 generic.go:334] "Generic (PLEG): container finished" podID="7617aa8b-ee26-4233-94a9-12af98518454" containerID="37c938bf58f77b8f6b4022e7751be1301fdd981d4299fb4a47514e6b5f79dfcc" exitCode=0 Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.424124 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" event={"ID":"7617aa8b-ee26-4233-94a9-12af98518454","Type":"ContainerDied","Data":"37c938bf58f77b8f6b4022e7751be1301fdd981d4299fb4a47514e6b5f79dfcc"} Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.424149 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" event={"ID":"7617aa8b-ee26-4233-94a9-12af98518454","Type":"ContainerDied","Data":"3f970561d8e2b43e833801f061a4dc1c9c9749f9731af982a6d16728861087cd"} Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.424166 5014 scope.go:117] "RemoveContainer" containerID="37c938bf58f77b8f6b4022e7751be1301fdd981d4299fb4a47514e6b5f79dfcc" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.424279 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf6f78f57-t2qwv" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.444112 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7617aa8b-ee26-4233-94a9-12af98518454" (UID: "7617aa8b-ee26-4233-94a9-12af98518454"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.444868 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.444900 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9sspn\" (UniqueName: \"kubernetes.io/projected/7617aa8b-ee26-4233-94a9-12af98518454-kube-api-access-9sspn\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.444909 5014 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.444918 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.445027 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-62g9k" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.447461 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-config" (OuterVolumeSpecName: "config") pod "7617aa8b-ee26-4233-94a9-12af98518454" (UID: "7617aa8b-ee26-4233-94a9-12af98518454"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.448989 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7617aa8b-ee26-4233-94a9-12af98518454" (UID: "7617aa8b-ee26-4233-94a9-12af98518454"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.499423 5014 scope.go:117] "RemoveContainer" containerID="0cf27772199ef6a3e669ce4cf42de172725b9669ac7316a55a315f1d212ffb07" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.505864 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.529235 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-wmc4z"] Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.558887 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.558919 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7617aa8b-ee26-4233-94a9-12af98518454-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.580806 5014 scope.go:117] "RemoveContainer" containerID="37c938bf58f77b8f6b4022e7751be1301fdd981d4299fb4a47514e6b5f79dfcc" Oct 06 21:49:03 crc kubenswrapper[5014]: E1006 21:49:03.582889 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37c938bf58f77b8f6b4022e7751be1301fdd981d4299fb4a47514e6b5f79dfcc\": container with ID starting with 37c938bf58f77b8f6b4022e7751be1301fdd981d4299fb4a47514e6b5f79dfcc not found: ID does not exist" containerID="37c938bf58f77b8f6b4022e7751be1301fdd981d4299fb4a47514e6b5f79dfcc" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.582940 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37c938bf58f77b8f6b4022e7751be1301fdd981d4299fb4a47514e6b5f79dfcc"} err="failed to get container status \"37c938bf58f77b8f6b4022e7751be1301fdd981d4299fb4a47514e6b5f79dfcc\": rpc error: code = NotFound desc = could not find container \"37c938bf58f77b8f6b4022e7751be1301fdd981d4299fb4a47514e6b5f79dfcc\": container with ID starting with 37c938bf58f77b8f6b4022e7751be1301fdd981d4299fb4a47514e6b5f79dfcc not found: ID does not exist" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.583023 5014 scope.go:117] "RemoveContainer" containerID="0cf27772199ef6a3e669ce4cf42de172725b9669ac7316a55a315f1d212ffb07" Oct 06 21:49:03 crc kubenswrapper[5014]: E1006 21:49:03.585156 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0cf27772199ef6a3e669ce4cf42de172725b9669ac7316a55a315f1d212ffb07\": container with ID starting with 0cf27772199ef6a3e669ce4cf42de172725b9669ac7316a55a315f1d212ffb07 not found: ID does not exist" containerID="0cf27772199ef6a3e669ce4cf42de172725b9669ac7316a55a315f1d212ffb07" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.585554 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0cf27772199ef6a3e669ce4cf42de172725b9669ac7316a55a315f1d212ffb07"} err="failed to get container status \"0cf27772199ef6a3e669ce4cf42de172725b9669ac7316a55a315f1d212ffb07\": rpc error: code = NotFound desc = could not find container \"0cf27772199ef6a3e669ce4cf42de172725b9669ac7316a55a315f1d212ffb07\": container with ID starting with 0cf27772199ef6a3e669ce4cf42de172725b9669ac7316a55a315f1d212ffb07 not found: ID does not exist" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.664732 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8d44b7457-xcmp6"] Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.704107 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:49:03 crc kubenswrapper[5014]: E1006 21:49:03.704496 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7617aa8b-ee26-4233-94a9-12af98518454" containerName="dnsmasq-dns" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.704512 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="7617aa8b-ee26-4233-94a9-12af98518454" containerName="dnsmasq-dns" Oct 06 21:49:03 crc kubenswrapper[5014]: E1006 21:49:03.704539 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7617aa8b-ee26-4233-94a9-12af98518454" containerName="init" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.704546 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="7617aa8b-ee26-4233-94a9-12af98518454" containerName="init" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.704719 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="7617aa8b-ee26-4233-94a9-12af98518454" containerName="dnsmasq-dns" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.705578 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.707355 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-wr8xs" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.709976 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.710130 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.710283 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.742757 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.763906 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-scripts\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.764435 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zx88\" (UniqueName: \"kubernetes.io/projected/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-kube-api-access-6zx88\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.764486 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-logs\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.764518 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.764541 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.766232 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.766319 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.767146 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-config-data\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.854967 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-pwpps"] Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.870311 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-scripts\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.870519 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zx88\" (UniqueName: \"kubernetes.io/projected/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-kube-api-access-6zx88\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.870632 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-logs\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.870737 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.870801 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.870864 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.870932 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.871019 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-config-data\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.871948 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.873430 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.874080 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.874404 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-logs\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.875242 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.875780 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.875979 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.880264 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-config-data\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.884730 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.885846 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.886968 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-scripts\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.895534 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.915997 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zx88\" (UniqueName: \"kubernetes.io/projected/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-kube-api-access-6zx88\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.917090 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.973184 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.973247 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-logs\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.973300 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrtsw\" (UniqueName: \"kubernetes.io/projected/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-kube-api-access-mrtsw\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.973333 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.973351 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.973373 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.973399 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:03 crc kubenswrapper[5014]: I1006 21:49:03.973436 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.007229 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.075316 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.075376 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.075466 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.075529 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-logs\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.075589 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrtsw\" (UniqueName: \"kubernetes.io/projected/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-kube-api-access-mrtsw\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.080520 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.085107 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.085197 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.085291 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.085751 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.086314 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-logs\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.123707 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf6f78f57-t2qwv"] Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.140142 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cf6f78f57-t2qwv"] Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.152249 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-62g9k"] Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.186669 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.221406 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-79rxq"] Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.226926 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-rf5xr"] Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.237847 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.272986 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.273000 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.274908 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrtsw\" (UniqueName: \"kubernetes.io/projected/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-kube-api-access-mrtsw\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.275534 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.287474 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77dd5cf987-sjkjh"] Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.349919 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.448875 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8a58cf95-3f4c-4369-acbf-117df5f667be","Type":"ContainerStarted","Data":"8a9f99650810fcd3755a788e5103334008d31df66fe0d09658a43767f2c3c0d7"} Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.456025 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" event={"ID":"6a646f3e-89a6-4157-9b41-eefd507dd76e","Type":"ContainerStarted","Data":"96686873d2858802eda517c8e7b2427c820ebe92b65ffee1f4fb0d23ed237ccb"} Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.456256 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" event={"ID":"6a646f3e-89a6-4157-9b41-eefd507dd76e","Type":"ContainerStarted","Data":"958be0e0f4a332a3539080715994e0b9b00938a81b4ce28ff5f20087db06ec2f"} Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.462681 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" event={"ID":"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e","Type":"ContainerStarted","Data":"b0afb201ed5bceee9d10313a5830d8a20e46e87c896313b9f30c5d23a835591c"} Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.465782 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-wmc4z" event={"ID":"cdebd302-11d5-44be-9893-f9f408c8a7d0","Type":"ContainerStarted","Data":"17ada03c06ef0ed42fb4509093ae61bf8a5112d17d914c8bebab906bb79f4014"} Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.465814 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-wmc4z" event={"ID":"cdebd302-11d5-44be-9893-f9f408c8a7d0","Type":"ContainerStarted","Data":"e010c8895359dcc0073c1792d1557d4444000b6b1e9d399bf3fd175cfaa42f9a"} Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.486528 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-rf5xr" event={"ID":"6386c486-dff3-4e2a-8312-d14c0b3ba0a5","Type":"ContainerStarted","Data":"2bdca24e063c87de3505e5a3d06ff9d2d3f38417c88eb921a053a1bd865b1df8"} Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.496587 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-62g9k" event={"ID":"b4ee6065-52e4-434d-a944-a56539092b3b","Type":"ContainerStarted","Data":"413ff7ac494343f95ff9b2a16d6e1c8a03c2f50ce61383e2644ff72503ac0bef"} Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.498107 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-79rxq" event={"ID":"b198c4e1-6133-4729-b58a-c83946d45a5d","Type":"ContainerStarted","Data":"bc157548cc65148f2aec070d4a827a713e3e84b5bdb1f654b757fe3efe9566e6"} Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.500539 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-pwpps" event={"ID":"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6","Type":"ContainerStarted","Data":"d027fc574ed3e1780f86823b64d54c340ae29116db94cb57a95a5b184ff616b8"} Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.500558 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-pwpps" event={"ID":"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6","Type":"ContainerStarted","Data":"60305645a65e7cb1080ea012f970c90e8f148b84776e5a899a0be5f4a0a1afe7"} Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.500814 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-wmc4z" podStartSLOduration=2.500794101 podStartE2EDuration="2.500794101s" podCreationTimestamp="2025-10-06 21:49:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:04.494138559 +0000 UTC m=+1089.787175293" watchObservedRunningTime="2025-10-06 21:49:04.500794101 +0000 UTC m=+1089.793830835" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.519910 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-pwpps" podStartSLOduration=2.519889856 podStartE2EDuration="2.519889856s" podCreationTimestamp="2025-10-06 21:49:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:04.514361011 +0000 UTC m=+1089.807397735" watchObservedRunningTime="2025-10-06 21:49:04.519889856 +0000 UTC m=+1089.812926590" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.638283 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.844223 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:49:04 crc kubenswrapper[5014]: I1006 21:49:04.855610 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.007538 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-ovsdbserver-sb\") pod \"6a646f3e-89a6-4157-9b41-eefd507dd76e\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.007674 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-dns-swift-storage-0\") pod \"6a646f3e-89a6-4157-9b41-eefd507dd76e\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.007766 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-config\") pod \"6a646f3e-89a6-4157-9b41-eefd507dd76e\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.007792 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-dns-svc\") pod \"6a646f3e-89a6-4157-9b41-eefd507dd76e\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.007820 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dzld\" (UniqueName: \"kubernetes.io/projected/6a646f3e-89a6-4157-9b41-eefd507dd76e-kube-api-access-9dzld\") pod \"6a646f3e-89a6-4157-9b41-eefd507dd76e\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.007864 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-ovsdbserver-nb\") pod \"6a646f3e-89a6-4157-9b41-eefd507dd76e\" (UID: \"6a646f3e-89a6-4157-9b41-eefd507dd76e\") " Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.041454 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-config" (OuterVolumeSpecName: "config") pod "6a646f3e-89a6-4157-9b41-eefd507dd76e" (UID: "6a646f3e-89a6-4157-9b41-eefd507dd76e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.041466 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6a646f3e-89a6-4157-9b41-eefd507dd76e" (UID: "6a646f3e-89a6-4157-9b41-eefd507dd76e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.046123 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6a646f3e-89a6-4157-9b41-eefd507dd76e" (UID: "6a646f3e-89a6-4157-9b41-eefd507dd76e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.048829 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6a646f3e-89a6-4157-9b41-eefd507dd76e" (UID: "6a646f3e-89a6-4157-9b41-eefd507dd76e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.050134 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a646f3e-89a6-4157-9b41-eefd507dd76e-kube-api-access-9dzld" (OuterVolumeSpecName: "kube-api-access-9dzld") pod "6a646f3e-89a6-4157-9b41-eefd507dd76e" (UID: "6a646f3e-89a6-4157-9b41-eefd507dd76e"). InnerVolumeSpecName "kube-api-access-9dzld". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.053579 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6a646f3e-89a6-4157-9b41-eefd507dd76e" (UID: "6a646f3e-89a6-4157-9b41-eefd507dd76e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.111107 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.111300 5014 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.111312 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.111320 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.111330 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dzld\" (UniqueName: \"kubernetes.io/projected/6a646f3e-89a6-4157-9b41-eefd507dd76e-kube-api-access-9dzld\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.111340 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6a646f3e-89a6-4157-9b41-eefd507dd76e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.265981 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:49:05 crc kubenswrapper[5014]: W1006 21:49:05.287821 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f4181c2_debb_4a8d_a1ab_1a8d88d087c7.slice/crio-b23cafbb54020dbc74ab857644905063599b40681a6ffef9709685b064208743 WatchSource:0}: Error finding container b23cafbb54020dbc74ab857644905063599b40681a6ffef9709685b064208743: Status 404 returned error can't find the container with id b23cafbb54020dbc74ab857644905063599b40681a6ffef9709685b064208743 Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.387836 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.428201 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.474777 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.534328 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7617aa8b-ee26-4233-94a9-12af98518454" path="/var/lib/kubelet/pods/7617aa8b-ee26-4233-94a9-12af98518454/volumes" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.576767 5014 generic.go:334] "Generic (PLEG): container finished" podID="0a05bafc-b81d-4f0e-9d9f-2807c0586c9e" containerID="34e130e06f1a53f66bd521eb3f46b5f179a77b35008bb9a6c23af351993228be" exitCode=0 Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.576873 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" event={"ID":"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e","Type":"ContainerDied","Data":"34e130e06f1a53f66bd521eb3f46b5f179a77b35008bb9a6c23af351993228be"} Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.589689 5014 generic.go:334] "Generic (PLEG): container finished" podID="6a646f3e-89a6-4157-9b41-eefd507dd76e" containerID="96686873d2858802eda517c8e7b2427c820ebe92b65ffee1f4fb0d23ed237ccb" exitCode=0 Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.589859 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.590816 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" event={"ID":"6a646f3e-89a6-4157-9b41-eefd507dd76e","Type":"ContainerDied","Data":"96686873d2858802eda517c8e7b2427c820ebe92b65ffee1f4fb0d23ed237ccb"} Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.590848 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8d44b7457-xcmp6" event={"ID":"6a646f3e-89a6-4157-9b41-eefd507dd76e","Type":"ContainerDied","Data":"958be0e0f4a332a3539080715994e0b9b00938a81b4ce28ff5f20087db06ec2f"} Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.590869 5014 scope.go:117] "RemoveContainer" containerID="96686873d2858802eda517c8e7b2427c820ebe92b65ffee1f4fb0d23ed237ccb" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.616634 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb","Type":"ContainerStarted","Data":"5098e302cf329e07daa489e84f923cb66c1dbd530b401d1a69f4f3b1eaf7d786"} Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.628708 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7","Type":"ContainerStarted","Data":"b23cafbb54020dbc74ab857644905063599b40681a6ffef9709685b064208743"} Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.730683 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8d44b7457-xcmp6"] Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.743871 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8d44b7457-xcmp6"] Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.880645 5014 scope.go:117] "RemoveContainer" containerID="96686873d2858802eda517c8e7b2427c820ebe92b65ffee1f4fb0d23ed237ccb" Oct 06 21:49:05 crc kubenswrapper[5014]: E1006 21:49:05.881846 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96686873d2858802eda517c8e7b2427c820ebe92b65ffee1f4fb0d23ed237ccb\": container with ID starting with 96686873d2858802eda517c8e7b2427c820ebe92b65ffee1f4fb0d23ed237ccb not found: ID does not exist" containerID="96686873d2858802eda517c8e7b2427c820ebe92b65ffee1f4fb0d23ed237ccb" Oct 06 21:49:05 crc kubenswrapper[5014]: I1006 21:49:05.881896 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96686873d2858802eda517c8e7b2427c820ebe92b65ffee1f4fb0d23ed237ccb"} err="failed to get container status \"96686873d2858802eda517c8e7b2427c820ebe92b65ffee1f4fb0d23ed237ccb\": rpc error: code = NotFound desc = could not find container \"96686873d2858802eda517c8e7b2427c820ebe92b65ffee1f4fb0d23ed237ccb\": container with ID starting with 96686873d2858802eda517c8e7b2427c820ebe92b65ffee1f4fb0d23ed237ccb not found: ID does not exist" Oct 06 21:49:06 crc kubenswrapper[5014]: I1006 21:49:06.645561 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" event={"ID":"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e","Type":"ContainerStarted","Data":"14cfbab9388c4de2eada217b75e563a766b9d49fbb94e8750e2a5fdaaac974a0"} Oct 06 21:49:06 crc kubenswrapper[5014]: I1006 21:49:06.645921 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:06 crc kubenswrapper[5014]: I1006 21:49:06.655256 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb","Type":"ContainerStarted","Data":"8805433cbecafc1e70dc7ad5f1f319d86ad7748c493900543deaf3f0efedc1c8"} Oct 06 21:49:06 crc kubenswrapper[5014]: I1006 21:49:06.658980 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7","Type":"ContainerStarted","Data":"424aadf618aa3a87294110b8fbdb2352e5697ea4a87e4efffead15575896ec54"} Oct 06 21:49:06 crc kubenswrapper[5014]: I1006 21:49:06.675366 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" podStartSLOduration=3.675350216 podStartE2EDuration="3.675350216s" podCreationTimestamp="2025-10-06 21:49:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:06.673711344 +0000 UTC m=+1091.966748078" watchObservedRunningTime="2025-10-06 21:49:06.675350216 +0000 UTC m=+1091.968386950" Oct 06 21:49:07 crc kubenswrapper[5014]: I1006 21:49:07.504312 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a646f3e-89a6-4157-9b41-eefd507dd76e" path="/var/lib/kubelet/pods/6a646f3e-89a6-4157-9b41-eefd507dd76e/volumes" Oct 06 21:49:07 crc kubenswrapper[5014]: I1006 21:49:07.671877 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7","Type":"ContainerStarted","Data":"92ed03187c2f863f58c94cfdba111b7c6b0d85d7dc930679ea3e14dfa1d61408"} Oct 06 21:49:07 crc kubenswrapper[5014]: I1006 21:49:07.673160 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" containerName="glance-log" containerID="cri-o://424aadf618aa3a87294110b8fbdb2352e5697ea4a87e4efffead15575896ec54" gracePeriod=30 Oct 06 21:49:07 crc kubenswrapper[5014]: I1006 21:49:07.673850 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" containerName="glance-httpd" containerID="cri-o://92ed03187c2f863f58c94cfdba111b7c6b0d85d7dc930679ea3e14dfa1d61408" gracePeriod=30 Oct 06 21:49:07 crc kubenswrapper[5014]: I1006 21:49:07.676800 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb","Type":"ContainerStarted","Data":"1cd76d95afc981f88a39a19c64a99d3a6493c08c25f993c5b4df9c93f656ebcd"} Oct 06 21:49:07 crc kubenswrapper[5014]: I1006 21:49:07.677388 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" containerName="glance-log" containerID="cri-o://8805433cbecafc1e70dc7ad5f1f319d86ad7748c493900543deaf3f0efedc1c8" gracePeriod=30 Oct 06 21:49:07 crc kubenswrapper[5014]: I1006 21:49:07.677538 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" containerName="glance-httpd" containerID="cri-o://1cd76d95afc981f88a39a19c64a99d3a6493c08c25f993c5b4df9c93f656ebcd" gracePeriod=30 Oct 06 21:49:07 crc kubenswrapper[5014]: I1006 21:49:07.716451 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.716430823 podStartE2EDuration="5.716430823s" podCreationTimestamp="2025-10-06 21:49:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:07.701395145 +0000 UTC m=+1092.994431879" watchObservedRunningTime="2025-10-06 21:49:07.716430823 +0000 UTC m=+1093.009467557" Oct 06 21:49:07 crc kubenswrapper[5014]: I1006 21:49:07.742837 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.74281417 podStartE2EDuration="5.74281417s" podCreationTimestamp="2025-10-06 21:49:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:07.73053016 +0000 UTC m=+1093.023566904" watchObservedRunningTime="2025-10-06 21:49:07.74281417 +0000 UTC m=+1093.035850904" Oct 06 21:49:08 crc kubenswrapper[5014]: I1006 21:49:08.690342 5014 generic.go:334] "Generic (PLEG): container finished" podID="4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" containerID="92ed03187c2f863f58c94cfdba111b7c6b0d85d7dc930679ea3e14dfa1d61408" exitCode=0 Oct 06 21:49:08 crc kubenswrapper[5014]: I1006 21:49:08.690376 5014 generic.go:334] "Generic (PLEG): container finished" podID="4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" containerID="424aadf618aa3a87294110b8fbdb2352e5697ea4a87e4efffead15575896ec54" exitCode=143 Oct 06 21:49:08 crc kubenswrapper[5014]: I1006 21:49:08.690424 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7","Type":"ContainerDied","Data":"92ed03187c2f863f58c94cfdba111b7c6b0d85d7dc930679ea3e14dfa1d61408"} Oct 06 21:49:08 crc kubenswrapper[5014]: I1006 21:49:08.690452 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7","Type":"ContainerDied","Data":"424aadf618aa3a87294110b8fbdb2352e5697ea4a87e4efffead15575896ec54"} Oct 06 21:49:08 crc kubenswrapper[5014]: I1006 21:49:08.692016 5014 generic.go:334] "Generic (PLEG): container finished" podID="cdebd302-11d5-44be-9893-f9f408c8a7d0" containerID="17ada03c06ef0ed42fb4509093ae61bf8a5112d17d914c8bebab906bb79f4014" exitCode=0 Oct 06 21:49:08 crc kubenswrapper[5014]: I1006 21:49:08.692064 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-wmc4z" event={"ID":"cdebd302-11d5-44be-9893-f9f408c8a7d0","Type":"ContainerDied","Data":"17ada03c06ef0ed42fb4509093ae61bf8a5112d17d914c8bebab906bb79f4014"} Oct 06 21:49:08 crc kubenswrapper[5014]: I1006 21:49:08.695198 5014 generic.go:334] "Generic (PLEG): container finished" podID="8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" containerID="1cd76d95afc981f88a39a19c64a99d3a6493c08c25f993c5b4df9c93f656ebcd" exitCode=0 Oct 06 21:49:08 crc kubenswrapper[5014]: I1006 21:49:08.695239 5014 generic.go:334] "Generic (PLEG): container finished" podID="8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" containerID="8805433cbecafc1e70dc7ad5f1f319d86ad7748c493900543deaf3f0efedc1c8" exitCode=143 Oct 06 21:49:08 crc kubenswrapper[5014]: I1006 21:49:08.695274 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb","Type":"ContainerDied","Data":"1cd76d95afc981f88a39a19c64a99d3a6493c08c25f993c5b4df9c93f656ebcd"} Oct 06 21:49:08 crc kubenswrapper[5014]: I1006 21:49:08.695325 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb","Type":"ContainerDied","Data":"8805433cbecafc1e70dc7ad5f1f319d86ad7748c493900543deaf3f0efedc1c8"} Oct 06 21:49:13 crc kubenswrapper[5014]: I1006 21:49:13.509325 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:13 crc kubenswrapper[5014]: I1006 21:49:13.575391 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59f45f6cf7-cmxts"] Oct 06 21:49:13 crc kubenswrapper[5014]: I1006 21:49:13.575649 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" podUID="0eea5313-8d2a-4066-9896-15ed6ada8e4a" containerName="dnsmasq-dns" containerID="cri-o://f048351e68f473e6b5e9a6811cebb92cb76348c3c9a8ea09e0cf146f801628ef" gracePeriod=10 Oct 06 21:49:14 crc kubenswrapper[5014]: I1006 21:49:14.765554 5014 generic.go:334] "Generic (PLEG): container finished" podID="0eea5313-8d2a-4066-9896-15ed6ada8e4a" containerID="f048351e68f473e6b5e9a6811cebb92cb76348c3c9a8ea09e0cf146f801628ef" exitCode=0 Oct 06 21:49:14 crc kubenswrapper[5014]: I1006 21:49:14.766086 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" event={"ID":"0eea5313-8d2a-4066-9896-15ed6ada8e4a","Type":"ContainerDied","Data":"f048351e68f473e6b5e9a6811cebb92cb76348c3c9a8ea09e0cf146f801628ef"} Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.122093 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.256448 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-fernet-keys\") pod \"cdebd302-11d5-44be-9893-f9f408c8a7d0\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.256519 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-combined-ca-bundle\") pod \"cdebd302-11d5-44be-9893-f9f408c8a7d0\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.256571 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-scripts\") pod \"cdebd302-11d5-44be-9893-f9f408c8a7d0\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.256609 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-credential-keys\") pod \"cdebd302-11d5-44be-9893-f9f408c8a7d0\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.256704 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67sjm\" (UniqueName: \"kubernetes.io/projected/cdebd302-11d5-44be-9893-f9f408c8a7d0-kube-api-access-67sjm\") pod \"cdebd302-11d5-44be-9893-f9f408c8a7d0\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.256803 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-config-data\") pod \"cdebd302-11d5-44be-9893-f9f408c8a7d0\" (UID: \"cdebd302-11d5-44be-9893-f9f408c8a7d0\") " Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.263273 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-scripts" (OuterVolumeSpecName: "scripts") pod "cdebd302-11d5-44be-9893-f9f408c8a7d0" (UID: "cdebd302-11d5-44be-9893-f9f408c8a7d0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.263813 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "cdebd302-11d5-44be-9893-f9f408c8a7d0" (UID: "cdebd302-11d5-44be-9893-f9f408c8a7d0"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.264298 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdebd302-11d5-44be-9893-f9f408c8a7d0-kube-api-access-67sjm" (OuterVolumeSpecName: "kube-api-access-67sjm") pod "cdebd302-11d5-44be-9893-f9f408c8a7d0" (UID: "cdebd302-11d5-44be-9893-f9f408c8a7d0"). InnerVolumeSpecName "kube-api-access-67sjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.264522 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "cdebd302-11d5-44be-9893-f9f408c8a7d0" (UID: "cdebd302-11d5-44be-9893-f9f408c8a7d0"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.280861 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-config-data" (OuterVolumeSpecName: "config-data") pod "cdebd302-11d5-44be-9893-f9f408c8a7d0" (UID: "cdebd302-11d5-44be-9893-f9f408c8a7d0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.289079 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cdebd302-11d5-44be-9893-f9f408c8a7d0" (UID: "cdebd302-11d5-44be-9893-f9f408c8a7d0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.359750 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.359787 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.359801 5014 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-credential-keys\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.359811 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67sjm\" (UniqueName: \"kubernetes.io/projected/cdebd302-11d5-44be-9893-f9f408c8a7d0-kube-api-access-67sjm\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.359824 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.359834 5014 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cdebd302-11d5-44be-9893-f9f408c8a7d0-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.789348 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-wmc4z" event={"ID":"cdebd302-11d5-44be-9893-f9f408c8a7d0","Type":"ContainerDied","Data":"e010c8895359dcc0073c1792d1557d4444000b6b1e9d399bf3fd175cfaa42f9a"} Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.789398 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e010c8895359dcc0073c1792d1557d4444000b6b1e9d399bf3fd175cfaa42f9a" Oct 06 21:49:16 crc kubenswrapper[5014]: I1006 21:49:16.789484 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-wmc4z" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.201892 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-wmc4z"] Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.214492 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-wmc4z"] Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.313795 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-27f2p"] Oct 06 21:49:17 crc kubenswrapper[5014]: E1006 21:49:17.314158 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdebd302-11d5-44be-9893-f9f408c8a7d0" containerName="keystone-bootstrap" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.314176 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdebd302-11d5-44be-9893-f9f408c8a7d0" containerName="keystone-bootstrap" Oct 06 21:49:17 crc kubenswrapper[5014]: E1006 21:49:17.314211 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a646f3e-89a6-4157-9b41-eefd507dd76e" containerName="init" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.314218 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a646f3e-89a6-4157-9b41-eefd507dd76e" containerName="init" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.314391 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdebd302-11d5-44be-9893-f9f408c8a7d0" containerName="keystone-bootstrap" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.314408 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a646f3e-89a6-4157-9b41-eefd507dd76e" containerName="init" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.315065 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.320657 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.320724 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.320925 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-xch9b" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.321037 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.336186 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-27f2p"] Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.492876 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-config-data\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.493039 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fck42\" (UniqueName: \"kubernetes.io/projected/ada193c9-b872-4490-bb95-a27e9f542aec-kube-api-access-fck42\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.493120 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-credential-keys\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.493220 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-scripts\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.493254 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-combined-ca-bundle\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.493323 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-fernet-keys\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.493945 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cdebd302-11d5-44be-9893-f9f408c8a7d0" path="/var/lib/kubelet/pods/cdebd302-11d5-44be-9893-f9f408c8a7d0/volumes" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.594894 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-scripts\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.594943 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-combined-ca-bundle\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.595303 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-fernet-keys\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.595331 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-config-data\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.595398 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fck42\" (UniqueName: \"kubernetes.io/projected/ada193c9-b872-4490-bb95-a27e9f542aec-kube-api-access-fck42\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.595442 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-credential-keys\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.600453 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-credential-keys\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.603037 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-fernet-keys\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.611049 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-config-data\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.611504 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-scripts\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.611741 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-combined-ca-bundle\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.613450 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fck42\" (UniqueName: \"kubernetes.io/projected/ada193c9-b872-4490-bb95-a27e9f542aec-kube-api-access-fck42\") pod \"keystone-bootstrap-27f2p\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:17 crc kubenswrapper[5014]: I1006 21:49:17.638643 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:22 crc kubenswrapper[5014]: I1006 21:49:22.671160 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" podUID="0eea5313-8d2a-4066-9896-15ed6ada8e4a" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: i/o timeout" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.249332 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.257030 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.273499 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403047 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-scripts\") pod \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403095 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-ovsdbserver-sb\") pod \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403133 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-config\") pod \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403182 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-ovsdbserver-nb\") pod \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403205 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mrtsw\" (UniqueName: \"kubernetes.io/projected/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-kube-api-access-mrtsw\") pod \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403232 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-combined-ca-bundle\") pod \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403266 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-httpd-run\") pod \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403287 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-scripts\") pod \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403321 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-logs\") pod \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403356 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-public-tls-certs\") pod \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403370 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-config-data\") pod \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403404 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403430 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-logs\") pod \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403477 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-dns-svc\") pod \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403496 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-dns-swift-storage-0\") pod \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403515 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-config-data\") pod \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403573 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-internal-tls-certs\") pod \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\" (UID: \"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403603 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tgb7h\" (UniqueName: \"kubernetes.io/projected/0eea5313-8d2a-4066-9896-15ed6ada8e4a-kube-api-access-tgb7h\") pod \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\" (UID: \"0eea5313-8d2a-4066-9896-15ed6ada8e4a\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403632 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403676 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zx88\" (UniqueName: \"kubernetes.io/projected/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-kube-api-access-6zx88\") pod \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403693 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-combined-ca-bundle\") pod \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.403720 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-httpd-run\") pod \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\" (UID: \"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb\") " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.404944 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" (UID: "8f232509-c660-4a06-bdd0-8c7f3ff6a0fb"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.405089 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" (UID: "4f4181c2-debb-4a8d-a1ab-1a8d88d087c7"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.405445 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-logs" (OuterVolumeSpecName: "logs") pod "8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" (UID: "8f232509-c660-4a06-bdd0-8c7f3ff6a0fb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.410369 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-scripts" (OuterVolumeSpecName: "scripts") pod "4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" (UID: "4f4181c2-debb-4a8d-a1ab-1a8d88d087c7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.410546 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-scripts" (OuterVolumeSpecName: "scripts") pod "8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" (UID: "8f232509-c660-4a06-bdd0-8c7f3ff6a0fb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.410592 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0eea5313-8d2a-4066-9896-15ed6ada8e4a-kube-api-access-tgb7h" (OuterVolumeSpecName: "kube-api-access-tgb7h") pod "0eea5313-8d2a-4066-9896-15ed6ada8e4a" (UID: "0eea5313-8d2a-4066-9896-15ed6ada8e4a"). InnerVolumeSpecName "kube-api-access-tgb7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.414145 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-kube-api-access-6zx88" (OuterVolumeSpecName: "kube-api-access-6zx88") pod "8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" (UID: "8f232509-c660-4a06-bdd0-8c7f3ff6a0fb"). InnerVolumeSpecName "kube-api-access-6zx88". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.417483 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-kube-api-access-mrtsw" (OuterVolumeSpecName: "kube-api-access-mrtsw") pod "4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" (UID: "4f4181c2-debb-4a8d-a1ab-1a8d88d087c7"). InnerVolumeSpecName "kube-api-access-mrtsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.418286 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-logs" (OuterVolumeSpecName: "logs") pod "4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" (UID: "4f4181c2-debb-4a8d-a1ab-1a8d88d087c7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.420087 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" (UID: "4f4181c2-debb-4a8d-a1ab-1a8d88d087c7"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.432131 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" (UID: "8f232509-c660-4a06-bdd0-8c7f3ff6a0fb"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.451248 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" (UID: "4f4181c2-debb-4a8d-a1ab-1a8d88d087c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.470204 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" (UID: "8f232509-c660-4a06-bdd0-8c7f3ff6a0fb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.480089 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-config" (OuterVolumeSpecName: "config") pod "0eea5313-8d2a-4066-9896-15ed6ada8e4a" (UID: "0eea5313-8d2a-4066-9896-15ed6ada8e4a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.481132 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0eea5313-8d2a-4066-9896-15ed6ada8e4a" (UID: "0eea5313-8d2a-4066-9896-15ed6ada8e4a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.490277 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0eea5313-8d2a-4066-9896-15ed6ada8e4a" (UID: "0eea5313-8d2a-4066-9896-15ed6ada8e4a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.502669 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" (UID: "4f4181c2-debb-4a8d-a1ab-1a8d88d087c7"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.505432 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" (UID: "8f232509-c660-4a06-bdd0-8c7f3ff6a0fb"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.505433 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-config-data" (OuterVolumeSpecName: "config-data") pod "4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" (UID: "4f4181c2-debb-4a8d-a1ab-1a8d88d087c7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.507912 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.507939 5014 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.507967 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tgb7h\" (UniqueName: \"kubernetes.io/projected/0eea5313-8d2a-4066-9896-15ed6ada8e4a-kube-api-access-tgb7h\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.507999 5014 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.508009 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zx88\" (UniqueName: \"kubernetes.io/projected/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-kube-api-access-6zx88\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.508019 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.508058 5014 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.508067 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.508076 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.508085 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.508093 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.508105 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mrtsw\" (UniqueName: \"kubernetes.io/projected/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-kube-api-access-mrtsw\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.508113 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.508121 5014 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.508129 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.508139 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.508149 5014 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.508170 5014 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.508178 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.509901 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0eea5313-8d2a-4066-9896-15ed6ada8e4a" (UID: "0eea5313-8d2a-4066-9896-15ed6ada8e4a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.510558 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0eea5313-8d2a-4066-9896-15ed6ada8e4a" (UID: "0eea5313-8d2a-4066-9896-15ed6ada8e4a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.535920 5014 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.551353 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-config-data" (OuterVolumeSpecName: "config-data") pod "8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" (UID: "8f232509-c660-4a06-bdd0-8c7f3ff6a0fb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.553625 5014 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.609970 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.610015 5014 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.610028 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.610040 5014 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0eea5313-8d2a-4066-9896-15ed6ada8e4a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.610055 5014 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.863921 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4f4181c2-debb-4a8d-a1ab-1a8d88d087c7","Type":"ContainerDied","Data":"b23cafbb54020dbc74ab857644905063599b40681a6ffef9709685b064208743"} Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.863988 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.864029 5014 scope.go:117] "RemoveContainer" containerID="92ed03187c2f863f58c94cfdba111b7c6b0d85d7dc930679ea3e14dfa1d61408" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.870830 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f232509-c660-4a06-bdd0-8c7f3ff6a0fb","Type":"ContainerDied","Data":"5098e302cf329e07daa489e84f923cb66c1dbd530b401d1a69f4f3b1eaf7d786"} Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.870877 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.875246 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" event={"ID":"0eea5313-8d2a-4066-9896-15ed6ada8e4a","Type":"ContainerDied","Data":"161fe1edefec2c5541287e723a41958fcebe30a2d25688c7bc43da706a4f97d6"} Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.875383 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.920269 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.938904 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.946713 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59f45f6cf7-cmxts"] Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.966439 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-59f45f6cf7-cmxts"] Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.978096 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:49:23 crc kubenswrapper[5014]: E1006 21:49:23.978540 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eea5313-8d2a-4066-9896-15ed6ada8e4a" containerName="dnsmasq-dns" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.978553 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eea5313-8d2a-4066-9896-15ed6ada8e4a" containerName="dnsmasq-dns" Oct 06 21:49:23 crc kubenswrapper[5014]: E1006 21:49:23.978569 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eea5313-8d2a-4066-9896-15ed6ada8e4a" containerName="init" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.978574 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eea5313-8d2a-4066-9896-15ed6ada8e4a" containerName="init" Oct 06 21:49:23 crc kubenswrapper[5014]: E1006 21:49:23.978584 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" containerName="glance-httpd" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.978590 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" containerName="glance-httpd" Oct 06 21:49:23 crc kubenswrapper[5014]: E1006 21:49:23.978606 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" containerName="glance-log" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.978611 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" containerName="glance-log" Oct 06 21:49:23 crc kubenswrapper[5014]: E1006 21:49:23.978648 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" containerName="glance-log" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.978655 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" containerName="glance-log" Oct 06 21:49:23 crc kubenswrapper[5014]: E1006 21:49:23.978663 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" containerName="glance-httpd" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.978668 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" containerName="glance-httpd" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.978831 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="0eea5313-8d2a-4066-9896-15ed6ada8e4a" containerName="dnsmasq-dns" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.978847 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" containerName="glance-httpd" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.978865 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" containerName="glance-log" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.978877 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" containerName="glance-httpd" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.978886 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" containerName="glance-log" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.980035 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.983457 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.983759 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.985427 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-wr8xs" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.987215 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Oct 06 21:49:23 crc kubenswrapper[5014]: I1006 21:49:23.990559 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.004532 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.013135 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.025366 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.027434 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.033799 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.034049 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.034315 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.132014 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.132060 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.132090 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-logs\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.132111 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-config-data\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.132125 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.132144 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.132161 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhwg5\" (UniqueName: \"kubernetes.io/projected/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-kube-api-access-nhwg5\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.132272 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h68kf\" (UniqueName: \"kubernetes.io/projected/97665fa2-3321-4c58-b469-d19238d1d8fa-kube-api-access-h68kf\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.132361 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/97665fa2-3321-4c58-b469-d19238d1d8fa-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.132394 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-scripts\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.132421 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.132501 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97665fa2-3321-4c58-b469-d19238d1d8fa-logs\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.132645 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.132759 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-config-data\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.132806 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.132861 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-scripts\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.234231 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.234282 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-config-data\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.234305 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.234328 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-scripts\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.234524 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.234594 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.234677 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-logs\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.234701 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-config-data\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.234718 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.234735 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.234751 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhwg5\" (UniqueName: \"kubernetes.io/projected/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-kube-api-access-nhwg5\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.234772 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h68kf\" (UniqueName: \"kubernetes.io/projected/97665fa2-3321-4c58-b469-d19238d1d8fa-kube-api-access-h68kf\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.234798 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/97665fa2-3321-4c58-b469-d19238d1d8fa-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.234813 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.234828 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-scripts\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.234853 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97665fa2-3321-4c58-b469-d19238d1d8fa-logs\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.235155 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.235478 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97665fa2-3321-4c58-b469-d19238d1d8fa-logs\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.235594 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.235873 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/97665fa2-3321-4c58-b469-d19238d1d8fa-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.236002 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-logs\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.236492 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.241160 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.244107 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-scripts\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.254888 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.254934 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-scripts\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.255128 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.255402 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.255886 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h68kf\" (UniqueName: \"kubernetes.io/projected/97665fa2-3321-4c58-b469-d19238d1d8fa-kube-api-access-h68kf\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.256264 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-config-data\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.256980 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhwg5\" (UniqueName: \"kubernetes.io/projected/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-kube-api-access-nhwg5\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.263689 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-config-data\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.285810 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.340409 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.343532 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.599556 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.736712 5014 scope.go:117] "RemoveContainer" containerID="424aadf618aa3a87294110b8fbdb2352e5697ea4a87e4efffead15575896ec54" Oct 06 21:49:24 crc kubenswrapper[5014]: E1006 21:49:24.755885 5014 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:85c75d60e1bd2f8a9ea0a2bb21a8df64c0a6f7b504cc1a05a355981d4b90e92f" Oct 06 21:49:24 crc kubenswrapper[5014]: E1006 21:49:24.756038 5014 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:85c75d60e1bd2f8a9ea0a2bb21a8df64c0a6f7b504cc1a05a355981d4b90e92f,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wwzdw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-79rxq_openstack(b198c4e1-6133-4729-b58a-c83946d45a5d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 06 21:49:24 crc kubenswrapper[5014]: E1006 21:49:24.757566 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-79rxq" podUID="b198c4e1-6133-4729-b58a-c83946d45a5d" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.817098 5014 scope.go:117] "RemoveContainer" containerID="1cd76d95afc981f88a39a19c64a99d3a6493c08c25f993c5b4df9c93f656ebcd" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.894904 5014 scope.go:117] "RemoveContainer" containerID="8805433cbecafc1e70dc7ad5f1f319d86ad7748c493900543deaf3f0efedc1c8" Oct 06 21:49:24 crc kubenswrapper[5014]: E1006 21:49:24.901162 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:85c75d60e1bd2f8a9ea0a2bb21a8df64c0a6f7b504cc1a05a355981d4b90e92f\\\"\"" pod="openstack/cinder-db-sync-79rxq" podUID="b198c4e1-6133-4729-b58a-c83946d45a5d" Oct 06 21:49:24 crc kubenswrapper[5014]: I1006 21:49:24.938211 5014 scope.go:117] "RemoveContainer" containerID="f048351e68f473e6b5e9a6811cebb92cb76348c3c9a8ea09e0cf146f801628ef" Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.000849 5014 scope.go:117] "RemoveContainer" containerID="84d8b33cd4ef3659cebede0f811eb719bc8a0b8766890078700632fd9a6dca70" Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.280391 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-27f2p"] Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.462313 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.500606 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0eea5313-8d2a-4066-9896-15ed6ada8e4a" path="/var/lib/kubelet/pods/0eea5313-8d2a-4066-9896-15ed6ada8e4a/volumes" Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.503242 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f4181c2-debb-4a8d-a1ab-1a8d88d087c7" path="/var/lib/kubelet/pods/4f4181c2-debb-4a8d-a1ab-1a8d88d087c7/volumes" Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.505509 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f232509-c660-4a06-bdd0-8c7f3ff6a0fb" path="/var/lib/kubelet/pods/8f232509-c660-4a06-bdd0-8c7f3ff6a0fb/volumes" Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.559732 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:49:25 crc kubenswrapper[5014]: W1006 21:49:25.565147 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod97665fa2_3321_4c58_b469_d19238d1d8fa.slice/crio-1bf245465c6b7e23fee3020e49c989301c2154a31a605e19250cd11487d99264 WatchSource:0}: Error finding container 1bf245465c6b7e23fee3020e49c989301c2154a31a605e19250cd11487d99264: Status 404 returned error can't find the container with id 1bf245465c6b7e23fee3020e49c989301c2154a31a605e19250cd11487d99264 Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.919367 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-27f2p" event={"ID":"ada193c9-b872-4490-bb95-a27e9f542aec","Type":"ContainerStarted","Data":"d77fd7ac2112ded27147e4409627c93dcae3392a0e112526dd74d8acaacc61c5"} Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.919809 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-27f2p" event={"ID":"ada193c9-b872-4490-bb95-a27e9f542aec","Type":"ContainerStarted","Data":"a4902fb435da4b9c46f750fe3b12c0c94c02bc0deb4c10b797d9c2cdf3f5aa1d"} Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.945248 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-62g9k" event={"ID":"b4ee6065-52e4-434d-a944-a56539092b3b","Type":"ContainerStarted","Data":"ca142d670e607e0c0275fd475dcb9a118d4430b891c9a73969f6dff49fe7e95f"} Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.949548 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-27f2p" podStartSLOduration=8.949525777 podStartE2EDuration="8.949525777s" podCreationTimestamp="2025-10-06 21:49:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:25.94333434 +0000 UTC m=+1111.236371084" watchObservedRunningTime="2025-10-06 21:49:25.949525777 +0000 UTC m=+1111.242562511" Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.949576 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"97665fa2-3321-4c58-b469-d19238d1d8fa","Type":"ContainerStarted","Data":"1bf245465c6b7e23fee3020e49c989301c2154a31a605e19250cd11487d99264"} Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.951595 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2","Type":"ContainerStarted","Data":"05301631f8a9771d5598613b5e3187b428ab613a73326585258ad97fa8ae7bd0"} Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.954222 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8a58cf95-3f4c-4369-acbf-117df5f667be","Type":"ContainerStarted","Data":"8704f70d1a64b2ccd55f5a03a5d7a1871a20054703ef04bead5ca8fd6eb3c37f"} Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.964303 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-62g9k" podStartSLOduration=3.488329347 podStartE2EDuration="23.964286835s" podCreationTimestamp="2025-10-06 21:49:02 +0000 UTC" firstStartedPulling="2025-10-06 21:49:04.277592706 +0000 UTC m=+1089.570629440" lastFinishedPulling="2025-10-06 21:49:24.753550194 +0000 UTC m=+1110.046586928" observedRunningTime="2025-10-06 21:49:25.960103793 +0000 UTC m=+1111.253140547" watchObservedRunningTime="2025-10-06 21:49:25.964286835 +0000 UTC m=+1111.257323569" Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.976384 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-rf5xr" event={"ID":"6386c486-dff3-4e2a-8312-d14c0b3ba0a5","Type":"ContainerStarted","Data":"f8c2374c05a9acca51f59d8cc1f112466396ec752bdd6d59ce4f4833fadceac7"} Oct 06 21:49:25 crc kubenswrapper[5014]: I1006 21:49:25.991086 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-rf5xr" podStartSLOduration=3.54550303 podStartE2EDuration="23.991069194s" podCreationTimestamp="2025-10-06 21:49:02 +0000 UTC" firstStartedPulling="2025-10-06 21:49:04.29188353 +0000 UTC m=+1089.584920264" lastFinishedPulling="2025-10-06 21:49:24.737449684 +0000 UTC m=+1110.030486428" observedRunningTime="2025-10-06 21:49:25.990762395 +0000 UTC m=+1111.283799149" watchObservedRunningTime="2025-10-06 21:49:25.991069194 +0000 UTC m=+1111.284105928" Oct 06 21:49:27 crc kubenswrapper[5014]: I1006 21:49:27.008840 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2","Type":"ContainerStarted","Data":"0430c60c323101c1b753f11e9d36d10e421145df1577e22216b217a8af949311"} Oct 06 21:49:27 crc kubenswrapper[5014]: I1006 21:49:27.011398 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"97665fa2-3321-4c58-b469-d19238d1d8fa","Type":"ContainerStarted","Data":"d1df02609b41c237d2564b4493e66e156d3221a07169d32ca23800175d82bca0"} Oct 06 21:49:27 crc kubenswrapper[5014]: I1006 21:49:27.671879 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-59f45f6cf7-cmxts" podUID="0eea5313-8d2a-4066-9896-15ed6ada8e4a" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: i/o timeout" Oct 06 21:49:28 crc kubenswrapper[5014]: I1006 21:49:28.025520 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8a58cf95-3f4c-4369-acbf-117df5f667be","Type":"ContainerStarted","Data":"c7ce71fae308be7a43355aa94b0d535ffb0ce183d6608339015ec6523d282785"} Oct 06 21:49:28 crc kubenswrapper[5014]: I1006 21:49:28.029341 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"97665fa2-3321-4c58-b469-d19238d1d8fa","Type":"ContainerStarted","Data":"ba73765393bceab8b12f6265873f17ba00941d5cc2b2b7ce28e6e96fee51409a"} Oct 06 21:49:28 crc kubenswrapper[5014]: I1006 21:49:28.039851 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2","Type":"ContainerStarted","Data":"e615d6fc74fa40caf082453baa3394e1e1aab609fbfb956be5bef45109329f1f"} Oct 06 21:49:28 crc kubenswrapper[5014]: I1006 21:49:28.062167 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.06214923 podStartE2EDuration="5.06214923s" podCreationTimestamp="2025-10-06 21:49:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:28.057799582 +0000 UTC m=+1113.350836316" watchObservedRunningTime="2025-10-06 21:49:28.06214923 +0000 UTC m=+1113.355185964" Oct 06 21:49:29 crc kubenswrapper[5014]: I1006 21:49:29.058970 5014 generic.go:334] "Generic (PLEG): container finished" podID="ada193c9-b872-4490-bb95-a27e9f542aec" containerID="d77fd7ac2112ded27147e4409627c93dcae3392a0e112526dd74d8acaacc61c5" exitCode=0 Oct 06 21:49:29 crc kubenswrapper[5014]: I1006 21:49:29.059981 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-27f2p" event={"ID":"ada193c9-b872-4490-bb95-a27e9f542aec","Type":"ContainerDied","Data":"d77fd7ac2112ded27147e4409627c93dcae3392a0e112526dd74d8acaacc61c5"} Oct 06 21:49:29 crc kubenswrapper[5014]: I1006 21:49:29.088123 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.088107869 podStartE2EDuration="6.088107869s" podCreationTimestamp="2025-10-06 21:49:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:28.088992632 +0000 UTC m=+1113.382029366" watchObservedRunningTime="2025-10-06 21:49:29.088107869 +0000 UTC m=+1114.381144593" Oct 06 21:49:30 crc kubenswrapper[5014]: I1006 21:49:30.071066 5014 generic.go:334] "Generic (PLEG): container finished" podID="6386c486-dff3-4e2a-8312-d14c0b3ba0a5" containerID="f8c2374c05a9acca51f59d8cc1f112466396ec752bdd6d59ce4f4833fadceac7" exitCode=0 Oct 06 21:49:30 crc kubenswrapper[5014]: I1006 21:49:30.071107 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-rf5xr" event={"ID":"6386c486-dff3-4e2a-8312-d14c0b3ba0a5","Type":"ContainerDied","Data":"f8c2374c05a9acca51f59d8cc1f112466396ec752bdd6d59ce4f4833fadceac7"} Oct 06 21:49:30 crc kubenswrapper[5014]: I1006 21:49:30.073574 5014 generic.go:334] "Generic (PLEG): container finished" podID="b4ee6065-52e4-434d-a944-a56539092b3b" containerID="ca142d670e607e0c0275fd475dcb9a118d4430b891c9a73969f6dff49fe7e95f" exitCode=0 Oct 06 21:49:30 crc kubenswrapper[5014]: I1006 21:49:30.073633 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-62g9k" event={"ID":"b4ee6065-52e4-434d-a944-a56539092b3b","Type":"ContainerDied","Data":"ca142d670e607e0c0275fd475dcb9a118d4430b891c9a73969f6dff49fe7e95f"} Oct 06 21:49:30 crc kubenswrapper[5014]: I1006 21:49:30.075421 5014 generic.go:334] "Generic (PLEG): container finished" podID="fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6" containerID="d027fc574ed3e1780f86823b64d54c340ae29116db94cb57a95a5b184ff616b8" exitCode=0 Oct 06 21:49:30 crc kubenswrapper[5014]: I1006 21:49:30.075453 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-pwpps" event={"ID":"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6","Type":"ContainerDied","Data":"d027fc574ed3e1780f86823b64d54c340ae29116db94cb57a95a5b184ff616b8"} Oct 06 21:49:31 crc kubenswrapper[5014]: I1006 21:49:31.986097 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-pwpps" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.012580 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.035615 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-62g9k" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.067989 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.105255 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-27f2p" event={"ID":"ada193c9-b872-4490-bb95-a27e9f542aec","Type":"ContainerDied","Data":"a4902fb435da4b9c46f750fe3b12c0c94c02bc0deb4c10b797d9c2cdf3f5aa1d"} Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.105298 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4902fb435da4b9c46f750fe3b12c0c94c02bc0deb4c10b797d9c2cdf3f5aa1d" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.105338 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-27f2p" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.108805 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-62g9k" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.109182 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-62g9k" event={"ID":"b4ee6065-52e4-434d-a944-a56539092b3b","Type":"ContainerDied","Data":"413ff7ac494343f95ff9b2a16d6e1c8a03c2f50ce61383e2644ff72503ac0bef"} Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.109218 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="413ff7ac494343f95ff9b2a16d6e1c8a03c2f50ce61383e2644ff72503ac0bef" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.113649 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ee6065-52e4-434d-a944-a56539092b3b-combined-ca-bundle\") pod \"b4ee6065-52e4-434d-a944-a56539092b3b\" (UID: \"b4ee6065-52e4-434d-a944-a56539092b3b\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.113772 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-logs\") pod \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.113810 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-combined-ca-bundle\") pod \"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6\" (UID: \"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.113865 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fck42\" (UniqueName: \"kubernetes.io/projected/ada193c9-b872-4490-bb95-a27e9f542aec-kube-api-access-fck42\") pod \"ada193c9-b872-4490-bb95-a27e9f542aec\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.113949 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-scripts\") pod \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.113983 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-combined-ca-bundle\") pod \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.114019 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-credential-keys\") pod \"ada193c9-b872-4490-bb95-a27e9f542aec\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.114057 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b4ee6065-52e4-434d-a944-a56539092b3b-db-sync-config-data\") pod \"b4ee6065-52e4-434d-a944-a56539092b3b\" (UID: \"b4ee6065-52e4-434d-a944-a56539092b3b\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.114106 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4blm\" (UniqueName: \"kubernetes.io/projected/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-kube-api-access-x4blm\") pod \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.114162 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt76s\" (UniqueName: \"kubernetes.io/projected/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-kube-api-access-vt76s\") pod \"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6\" (UID: \"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.114212 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-combined-ca-bundle\") pod \"ada193c9-b872-4490-bb95-a27e9f542aec\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.114262 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-config\") pod \"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6\" (UID: \"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.114290 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-798m2\" (UniqueName: \"kubernetes.io/projected/b4ee6065-52e4-434d-a944-a56539092b3b-kube-api-access-798m2\") pod \"b4ee6065-52e4-434d-a944-a56539092b3b\" (UID: \"b4ee6065-52e4-434d-a944-a56539092b3b\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.114323 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-scripts\") pod \"ada193c9-b872-4490-bb95-a27e9f542aec\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.114364 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-config-data\") pod \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\" (UID: \"6386c486-dff3-4e2a-8312-d14c0b3ba0a5\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.114409 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-fernet-keys\") pod \"ada193c9-b872-4490-bb95-a27e9f542aec\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.114452 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-config-data\") pod \"ada193c9-b872-4490-bb95-a27e9f542aec\" (UID: \"ada193c9-b872-4490-bb95-a27e9f542aec\") " Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.115766 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-pwpps" event={"ID":"fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6","Type":"ContainerDied","Data":"60305645a65e7cb1080ea012f970c90e8f148b84776e5a899a0be5f4a0a1afe7"} Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.115823 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="60305645a65e7cb1080ea012f970c90e8f148b84776e5a899a0be5f4a0a1afe7" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.115954 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-pwpps" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.123504 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-logs" (OuterVolumeSpecName: "logs") pod "6386c486-dff3-4e2a-8312-d14c0b3ba0a5" (UID: "6386c486-dff3-4e2a-8312-d14c0b3ba0a5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.124968 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-scripts" (OuterVolumeSpecName: "scripts") pod "6386c486-dff3-4e2a-8312-d14c0b3ba0a5" (UID: "6386c486-dff3-4e2a-8312-d14c0b3ba0a5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.135179 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-kube-api-access-x4blm" (OuterVolumeSpecName: "kube-api-access-x4blm") pod "6386c486-dff3-4e2a-8312-d14c0b3ba0a5" (UID: "6386c486-dff3-4e2a-8312-d14c0b3ba0a5"). InnerVolumeSpecName "kube-api-access-x4blm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.136788 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-rf5xr" event={"ID":"6386c486-dff3-4e2a-8312-d14c0b3ba0a5","Type":"ContainerDied","Data":"2bdca24e063c87de3505e5a3d06ff9d2d3f38417c88eb921a053a1bd865b1df8"} Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.136841 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2bdca24e063c87de3505e5a3d06ff9d2d3f38417c88eb921a053a1bd865b1df8" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.136919 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-rf5xr" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.145766 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-kube-api-access-vt76s" (OuterVolumeSpecName: "kube-api-access-vt76s") pod "fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6" (UID: "fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6"). InnerVolumeSpecName "kube-api-access-vt76s". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.147848 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ada193c9-b872-4490-bb95-a27e9f542aec-kube-api-access-fck42" (OuterVolumeSpecName: "kube-api-access-fck42") pod "ada193c9-b872-4490-bb95-a27e9f542aec" (UID: "ada193c9-b872-4490-bb95-a27e9f542aec"). InnerVolumeSpecName "kube-api-access-fck42". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.164885 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4ee6065-52e4-434d-a944-a56539092b3b-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b4ee6065-52e4-434d-a944-a56539092b3b" (UID: "b4ee6065-52e4-434d-a944-a56539092b3b"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.171677 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "ada193c9-b872-4490-bb95-a27e9f542aec" (UID: "ada193c9-b872-4490-bb95-a27e9f542aec"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.180134 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-scripts" (OuterVolumeSpecName: "scripts") pod "ada193c9-b872-4490-bb95-a27e9f542aec" (UID: "ada193c9-b872-4490-bb95-a27e9f542aec"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.206372 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-config" (OuterVolumeSpecName: "config") pod "fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6" (UID: "fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.213300 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4ee6065-52e4-434d-a944-a56539092b3b-kube-api-access-798m2" (OuterVolumeSpecName: "kube-api-access-798m2") pod "b4ee6065-52e4-434d-a944-a56539092b3b" (UID: "b4ee6065-52e4-434d-a944-a56539092b3b"). InnerVolumeSpecName "kube-api-access-798m2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.216194 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-config-data" (OuterVolumeSpecName: "config-data") pod "ada193c9-b872-4490-bb95-a27e9f542aec" (UID: "ada193c9-b872-4490-bb95-a27e9f542aec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.216826 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ada193c9-b872-4490-bb95-a27e9f542aec" (UID: "ada193c9-b872-4490-bb95-a27e9f542aec"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.217559 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.217594 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fck42\" (UniqueName: \"kubernetes.io/projected/ada193c9-b872-4490-bb95-a27e9f542aec-kube-api-access-fck42\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.217605 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.217627 5014 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-credential-keys\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.217665 5014 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b4ee6065-52e4-434d-a944-a56539092b3b-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.217674 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4blm\" (UniqueName: \"kubernetes.io/projected/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-kube-api-access-x4blm\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.217716 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt76s\" (UniqueName: \"kubernetes.io/projected/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-kube-api-access-vt76s\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.217726 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.217736 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-798m2\" (UniqueName: \"kubernetes.io/projected/b4ee6065-52e4-434d-a944-a56539092b3b-kube-api-access-798m2\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.217745 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.217753 5014 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.217761 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.225892 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6" (UID: "fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.226712 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ada193c9-b872-4490-bb95-a27e9f542aec" (UID: "ada193c9-b872-4490-bb95-a27e9f542aec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.228858 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-f4ddcc578-kbrhw"] Oct 06 21:49:32 crc kubenswrapper[5014]: E1006 21:49:32.229242 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6386c486-dff3-4e2a-8312-d14c0b3ba0a5" containerName="placement-db-sync" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.229263 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="6386c486-dff3-4e2a-8312-d14c0b3ba0a5" containerName="placement-db-sync" Oct 06 21:49:32 crc kubenswrapper[5014]: E1006 21:49:32.229285 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4ee6065-52e4-434d-a944-a56539092b3b" containerName="barbican-db-sync" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.229292 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4ee6065-52e4-434d-a944-a56539092b3b" containerName="barbican-db-sync" Oct 06 21:49:32 crc kubenswrapper[5014]: E1006 21:49:32.229308 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ada193c9-b872-4490-bb95-a27e9f542aec" containerName="keystone-bootstrap" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.229356 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ada193c9-b872-4490-bb95-a27e9f542aec" containerName="keystone-bootstrap" Oct 06 21:49:32 crc kubenswrapper[5014]: E1006 21:49:32.229364 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6" containerName="neutron-db-sync" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.229370 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6" containerName="neutron-db-sync" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.229529 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="6386c486-dff3-4e2a-8312-d14c0b3ba0a5" containerName="placement-db-sync" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.229556 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4ee6065-52e4-434d-a944-a56539092b3b" containerName="barbican-db-sync" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.229566 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ada193c9-b872-4490-bb95-a27e9f542aec" containerName="keystone-bootstrap" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.229580 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6" containerName="neutron-db-sync" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.230584 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.234126 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.236601 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.251982 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-f4ddcc578-kbrhw"] Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.263530 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4ee6065-52e4-434d-a944-a56539092b3b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b4ee6065-52e4-434d-a944-a56539092b3b" (UID: "b4ee6065-52e4-434d-a944-a56539092b3b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.264468 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-config-data" (OuterVolumeSpecName: "config-data") pod "6386c486-dff3-4e2a-8312-d14c0b3ba0a5" (UID: "6386c486-dff3-4e2a-8312-d14c0b3ba0a5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.268399 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6386c486-dff3-4e2a-8312-d14c0b3ba0a5" (UID: "6386c486-dff3-4e2a-8312-d14c0b3ba0a5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.318854 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-scripts\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.318900 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/411ad591-8dad-46ef-8a44-88e86f5c86dd-logs\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.318931 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-config-data\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.318983 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-public-tls-certs\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.319030 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-internal-tls-certs\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.319057 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smdpw\" (UniqueName: \"kubernetes.io/projected/411ad591-8dad-46ef-8a44-88e86f5c86dd-kube-api-access-smdpw\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.319095 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-combined-ca-bundle\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.319138 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.319150 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ee6065-52e4-434d-a944-a56539092b3b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.319160 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.319168 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6386c486-dff3-4e2a-8312-d14c0b3ba0a5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.319177 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ada193c9-b872-4490-bb95-a27e9f542aec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.361575 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6ffd8547bc-7wfft"] Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.362987 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.400178 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6ffd8547bc-7wfft"] Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.421641 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-ovsdbserver-nb\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.421703 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-internal-tls-certs\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.421732 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-dns-swift-storage-0\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.421751 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smdpw\" (UniqueName: \"kubernetes.io/projected/411ad591-8dad-46ef-8a44-88e86f5c86dd-kube-api-access-smdpw\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.421794 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-combined-ca-bundle\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.421855 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-scripts\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.422383 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/411ad591-8dad-46ef-8a44-88e86f5c86dd-logs\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.422411 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-config-data\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.422430 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svqwg\" (UniqueName: \"kubernetes.io/projected/c9302ee6-cd4b-4819-8dfe-c484385007de-kube-api-access-svqwg\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.422469 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-dns-svc\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.422511 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-ovsdbserver-sb\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.422528 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-config\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.422559 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-public-tls-certs\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.423237 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/411ad591-8dad-46ef-8a44-88e86f5c86dd-logs\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.432470 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-internal-tls-certs\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.434656 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-config-data\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.441537 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-combined-ca-bundle\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.442050 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-public-tls-certs\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.444474 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-scripts\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.464183 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smdpw\" (UniqueName: \"kubernetes.io/projected/411ad591-8dad-46ef-8a44-88e86f5c86dd-kube-api-access-smdpw\") pod \"placement-f4ddcc578-kbrhw\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.524839 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-ovsdbserver-nb\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.526952 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-dns-swift-storage-0\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.527201 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svqwg\" (UniqueName: \"kubernetes.io/projected/c9302ee6-cd4b-4819-8dfe-c484385007de-kube-api-access-svqwg\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.527278 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-dns-svc\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.527336 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-ovsdbserver-sb\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.527367 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-config\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.529824 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-ovsdbserver-nb\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.531379 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-dns-swift-storage-0\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.532123 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-dns-svc\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.533345 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-ovsdbserver-sb\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.538250 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-config\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.539188 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-85f7fb587d-lk8cm"] Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.541569 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.546403 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.546687 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-vt98g" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.546979 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.547153 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.557996 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.563252 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svqwg\" (UniqueName: \"kubernetes.io/projected/c9302ee6-cd4b-4819-8dfe-c484385007de-kube-api-access-svqwg\") pod \"dnsmasq-dns-6ffd8547bc-7wfft\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.575587 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-85f7fb587d-lk8cm"] Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.590125 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-849cf44bc5-9qnb4"] Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.591679 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.595090 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-2jlk6" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.595435 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.598297 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.629767 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-httpd-config\") pod \"neutron-85f7fb587d-lk8cm\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.629981 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-combined-ca-bundle\") pod \"neutron-85f7fb587d-lk8cm\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.630033 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pl5ck\" (UniqueName: \"kubernetes.io/projected/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-kube-api-access-pl5ck\") pod \"neutron-85f7fb587d-lk8cm\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.630063 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-config\") pod \"neutron-85f7fb587d-lk8cm\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.630130 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-ovndb-tls-certs\") pod \"neutron-85f7fb587d-lk8cm\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.632545 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-697f765b44-77s6g"] Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.635760 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.644005 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.665843 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-849cf44bc5-9qnb4"] Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.683541 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-697f765b44-77s6g"] Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.684946 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:32 crc kubenswrapper[5014]: I1006 21:49:32.720241 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6ffd8547bc-7wfft"] Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.731920 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cddb74997-nnd26"] Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.791158 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-combined-ca-bundle\") pod \"barbican-worker-849cf44bc5-9qnb4\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.791225 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-combined-ca-bundle\") pod \"neutron-85f7fb587d-lk8cm\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.791261 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13fe806f-0e4e-4ea7-838b-938c5fe74c99-logs\") pod \"barbican-keystone-listener-697f765b44-77s6g\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.791300 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hz964\" (UniqueName: \"kubernetes.io/projected/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-kube-api-access-hz964\") pod \"barbican-worker-849cf44bc5-9qnb4\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.791344 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pl5ck\" (UniqueName: \"kubernetes.io/projected/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-kube-api-access-pl5ck\") pod \"neutron-85f7fb587d-lk8cm\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.791400 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-config\") pod \"neutron-85f7fb587d-lk8cm\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.791456 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-ovndb-tls-certs\") pod \"neutron-85f7fb587d-lk8cm\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.791501 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-config-data-custom\") pod \"barbican-worker-849cf44bc5-9qnb4\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.791532 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-config-data-custom\") pod \"barbican-keystone-listener-697f765b44-77s6g\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.791559 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-config-data\") pod \"barbican-worker-849cf44bc5-9qnb4\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.791580 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-logs\") pod \"barbican-worker-849cf44bc5-9qnb4\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.791619 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-httpd-config\") pod \"neutron-85f7fb587d-lk8cm\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.791671 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-config-data\") pod \"barbican-keystone-listener-697f765b44-77s6g\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.791696 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-combined-ca-bundle\") pod \"barbican-keystone-listener-697f765b44-77s6g\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.791768 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6d7x\" (UniqueName: \"kubernetes.io/projected/13fe806f-0e4e-4ea7-838b-938c5fe74c99-kube-api-access-k6d7x\") pod \"barbican-keystone-listener-697f765b44-77s6g\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.793127 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.811017 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-httpd-config\") pod \"neutron-85f7fb587d-lk8cm\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.813995 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-combined-ca-bundle\") pod \"neutron-85f7fb587d-lk8cm\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.827461 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-ovndb-tls-certs\") pod \"neutron-85f7fb587d-lk8cm\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.827547 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pl5ck\" (UniqueName: \"kubernetes.io/projected/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-kube-api-access-pl5ck\") pod \"neutron-85f7fb587d-lk8cm\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.842934 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-config\") pod \"neutron-85f7fb587d-lk8cm\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.880852 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cddb74997-nnd26"] Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.919801 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-config-data-custom\") pod \"barbican-worker-849cf44bc5-9qnb4\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.919915 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-ovsdbserver-sb\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.919964 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-config-data-custom\") pod \"barbican-keystone-listener-697f765b44-77s6g\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.919985 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-config-data\") pod \"barbican-worker-849cf44bc5-9qnb4\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.920013 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-logs\") pod \"barbican-worker-849cf44bc5-9qnb4\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.920088 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-ovsdbserver-nb\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.920162 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-config-data\") pod \"barbican-keystone-listener-697f765b44-77s6g\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.920194 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-combined-ca-bundle\") pod \"barbican-keystone-listener-697f765b44-77s6g\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.920347 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6d7x\" (UniqueName: \"kubernetes.io/projected/13fe806f-0e4e-4ea7-838b-938c5fe74c99-kube-api-access-k6d7x\") pod \"barbican-keystone-listener-697f765b44-77s6g\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.920385 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-config\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.920433 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-dns-swift-storage-0\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.920584 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfcmq\" (UniqueName: \"kubernetes.io/projected/8248bd19-6009-4b21-9c52-1d016e1bfef2-kube-api-access-cfcmq\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.920615 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-combined-ca-bundle\") pod \"barbican-worker-849cf44bc5-9qnb4\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.920684 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-dns-svc\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.927238 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-logs\") pod \"barbican-worker-849cf44bc5-9qnb4\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.941701 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-config-data-custom\") pod \"barbican-worker-849cf44bc5-9qnb4\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.943580 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-config-data-custom\") pod \"barbican-keystone-listener-697f765b44-77s6g\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.945398 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-combined-ca-bundle\") pod \"barbican-worker-849cf44bc5-9qnb4\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.948068 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-combined-ca-bundle\") pod \"barbican-keystone-listener-697f765b44-77s6g\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.964048 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6d7x\" (UniqueName: \"kubernetes.io/projected/13fe806f-0e4e-4ea7-838b-938c5fe74c99-kube-api-access-k6d7x\") pod \"barbican-keystone-listener-697f765b44-77s6g\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.964861 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-config-data\") pod \"barbican-worker-849cf44bc5-9qnb4\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.964888 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-config-data\") pod \"barbican-keystone-listener-697f765b44-77s6g\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.969782 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13fe806f-0e4e-4ea7-838b-938c5fe74c99-logs\") pod \"barbican-keystone-listener-697f765b44-77s6g\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.969957 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hz964\" (UniqueName: \"kubernetes.io/projected/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-kube-api-access-hz964\") pod \"barbican-worker-849cf44bc5-9qnb4\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:32.983094 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13fe806f-0e4e-4ea7-838b-938c5fe74c99-logs\") pod \"barbican-keystone-listener-697f765b44-77s6g\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.023350 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hz964\" (UniqueName: \"kubernetes.io/projected/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-kube-api-access-hz964\") pod \"barbican-worker-849cf44bc5-9qnb4\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.040711 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-796d5f888-r5f9l"] Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.042411 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.051878 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.084595 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-ovsdbserver-sb\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.084688 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-ovsdbserver-nb\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.084758 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-config\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.084805 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-dns-swift-storage-0\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.084881 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfcmq\" (UniqueName: \"kubernetes.io/projected/8248bd19-6009-4b21-9c52-1d016e1bfef2-kube-api-access-cfcmq\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.084903 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-dns-svc\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.086473 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-dns-svc\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.087509 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-config\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.088435 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-ovsdbserver-sb\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.089289 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-ovsdbserver-nb\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.090593 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-dns-swift-storage-0\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.091484 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-796d5f888-r5f9l"] Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.120054 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfcmq\" (UniqueName: \"kubernetes.io/projected/8248bd19-6009-4b21-9c52-1d016e1bfef2-kube-api-access-cfcmq\") pod \"dnsmasq-dns-cddb74997-nnd26\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.153097 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8a58cf95-3f4c-4369-acbf-117df5f667be","Type":"ContainerStarted","Data":"024baa570e337a154f170236a97d79d9c8c1a615149a09636a6a026c16bece73"} Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.187275 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34d354c8-b18e-458f-b050-1e3fa676c220-logs\") pod \"barbican-api-796d5f888-r5f9l\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.187517 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-combined-ca-bundle\") pod \"barbican-api-796d5f888-r5f9l\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.187656 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-config-data-custom\") pod \"barbican-api-796d5f888-r5f9l\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.187689 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhldc\" (UniqueName: \"kubernetes.io/projected/34d354c8-b18e-458f-b050-1e3fa676c220-kube-api-access-qhldc\") pod \"barbican-api-796d5f888-r5f9l\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.187718 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-config-data\") pod \"barbican-api-796d5f888-r5f9l\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.288962 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34d354c8-b18e-458f-b050-1e3fa676c220-logs\") pod \"barbican-api-796d5f888-r5f9l\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.289017 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-combined-ca-bundle\") pod \"barbican-api-796d5f888-r5f9l\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.289158 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-config-data-custom\") pod \"barbican-api-796d5f888-r5f9l\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.289190 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhldc\" (UniqueName: \"kubernetes.io/projected/34d354c8-b18e-458f-b050-1e3fa676c220-kube-api-access-qhldc\") pod \"barbican-api-796d5f888-r5f9l\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.289221 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-config-data\") pod \"barbican-api-796d5f888-r5f9l\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.300364 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34d354c8-b18e-458f-b050-1e3fa676c220-logs\") pod \"barbican-api-796d5f888-r5f9l\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.300765 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-78bf4bbdb7-6fpl9"] Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.302398 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.306531 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-combined-ca-bundle\") pod \"barbican-api-796d5f888-r5f9l\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.322016 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-78bf4bbdb7-6fpl9"] Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.323368 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-config-data-custom\") pod \"barbican-api-796d5f888-r5f9l\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.336797 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.337018 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.337469 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.337541 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.337784 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.337874 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-xch9b" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.360374 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhldc\" (UniqueName: \"kubernetes.io/projected/34d354c8-b18e-458f-b050-1e3fa676c220-kube-api-access-qhldc\") pod \"barbican-api-796d5f888-r5f9l\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.394504 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-config-data\") pod \"barbican-api-796d5f888-r5f9l\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.400270 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-public-tls-certs\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.400341 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-internal-tls-certs\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.400371 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fc2ts\" (UniqueName: \"kubernetes.io/projected/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-kube-api-access-fc2ts\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.400487 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-credential-keys\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.400545 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-config-data\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.400588 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-fernet-keys\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.400607 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-scripts\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.400660 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-combined-ca-bundle\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.420537 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.431702 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.446843 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:49:33 crc kubenswrapper[5014]: E1006 21:49:33.450980 5014 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4ee6065_52e4_434d_a944_a56539092b3b.slice/crio-413ff7ac494343f95ff9b2a16d6e1c8a03c2f50ce61383e2644ff72503ac0bef\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podada193c9_b872_4490_bb95_a27e9f542aec.slice\": RecentStats: unable to find data in memory cache]" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.502985 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-credential-keys\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.506656 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-config-data\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.506793 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-fernet-keys\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.506864 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-scripts\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.506951 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-combined-ca-bundle\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.507065 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-public-tls-certs\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.507147 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-internal-tls-certs\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.507223 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fc2ts\" (UniqueName: \"kubernetes.io/projected/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-kube-api-access-fc2ts\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.505031 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.513685 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-internal-tls-certs\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.523816 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-scripts\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.527317 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-combined-ca-bundle\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.531506 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-config-data\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.532105 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-public-tls-certs\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.532206 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-fernet-keys\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.538733 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fc2ts\" (UniqueName: \"kubernetes.io/projected/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-kube-api-access-fc2ts\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.528069 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-credential-keys\") pod \"keystone-78bf4bbdb7-6fpl9\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.568374 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.581401 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.727644 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6ffd8547bc-7wfft"] Oct 06 21:49:33 crc kubenswrapper[5014]: I1006 21:49:33.733779 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-f4ddcc578-kbrhw"] Oct 06 21:49:34 crc kubenswrapper[5014]: I1006 21:49:34.185372 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f4ddcc578-kbrhw" event={"ID":"411ad591-8dad-46ef-8a44-88e86f5c86dd","Type":"ContainerStarted","Data":"1eb9237c7e9f252e8de293577b7af7ea2510be02c4bea5814397c0d261378073"} Oct 06 21:49:34 crc kubenswrapper[5014]: I1006 21:49:34.194058 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" event={"ID":"c9302ee6-cd4b-4819-8dfe-c484385007de","Type":"ContainerStarted","Data":"5715ef7593729c61a2ce9fad9c07f04b0d0b24b374f789c7ab824bfe888d0f46"} Oct 06 21:49:34 crc kubenswrapper[5014]: I1006 21:49:34.259921 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-85f7fb587d-lk8cm"] Oct 06 21:49:34 crc kubenswrapper[5014]: I1006 21:49:34.344278 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 06 21:49:34 crc kubenswrapper[5014]: I1006 21:49:34.344312 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 06 21:49:34 crc kubenswrapper[5014]: I1006 21:49:34.389212 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-796d5f888-r5f9l"] Oct 06 21:49:34 crc kubenswrapper[5014]: W1006 21:49:34.418736 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34d354c8_b18e_458f_b050_1e3fa676c220.slice/crio-975e2caae16a8b8a55c6e92a7e9e1c08dbba34b9215e51beee95bae7cca77bc5 WatchSource:0}: Error finding container 975e2caae16a8b8a55c6e92a7e9e1c08dbba34b9215e51beee95bae7cca77bc5: Status 404 returned error can't find the container with id 975e2caae16a8b8a55c6e92a7e9e1c08dbba34b9215e51beee95bae7cca77bc5 Oct 06 21:49:34 crc kubenswrapper[5014]: I1006 21:49:34.439236 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 06 21:49:34 crc kubenswrapper[5014]: I1006 21:49:34.445472 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 06 21:49:34 crc kubenswrapper[5014]: I1006 21:49:34.600323 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 06 21:49:34 crc kubenswrapper[5014]: I1006 21:49:34.601029 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 06 21:49:34 crc kubenswrapper[5014]: I1006 21:49:34.671918 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 06 21:49:34 crc kubenswrapper[5014]: I1006 21:49:34.686134 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 06 21:49:34 crc kubenswrapper[5014]: I1006 21:49:34.789877 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cddb74997-nnd26"] Oct 06 21:49:34 crc kubenswrapper[5014]: I1006 21:49:34.831627 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-78bf4bbdb7-6fpl9"] Oct 06 21:49:34 crc kubenswrapper[5014]: I1006 21:49:34.835706 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-849cf44bc5-9qnb4"] Oct 06 21:49:34 crc kubenswrapper[5014]: W1006 21:49:34.844379 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfccac7f9_eeaa_4481_ab49_9e71dd8af79c.slice/crio-5b9550d1305528ae1b51d93dfd6bb36724b1d7e88e1bee75f3e54ad303dd2119 WatchSource:0}: Error finding container 5b9550d1305528ae1b51d93dfd6bb36724b1d7e88e1bee75f3e54ad303dd2119: Status 404 returned error can't find the container with id 5b9550d1305528ae1b51d93dfd6bb36724b1d7e88e1bee75f3e54ad303dd2119 Oct 06 21:49:34 crc kubenswrapper[5014]: I1006 21:49:34.848751 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-697f765b44-77s6g"] Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.235582 5014 generic.go:334] "Generic (PLEG): container finished" podID="c9302ee6-cd4b-4819-8dfe-c484385007de" containerID="863163d64e48f22b45831f39f3b190616d29f7782a4fff4c34bb934a57fc2ff3" exitCode=0 Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.236188 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" event={"ID":"c9302ee6-cd4b-4819-8dfe-c484385007de","Type":"ContainerDied","Data":"863163d64e48f22b45831f39f3b190616d29f7782a4fff4c34bb934a57fc2ff3"} Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.243117 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cddb74997-nnd26" event={"ID":"8248bd19-6009-4b21-9c52-1d016e1bfef2","Type":"ContainerStarted","Data":"d5ac279e7611d1bedaddb17c1db532fd75f28e76675cf5ef7d66092ed4770c6d"} Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.246948 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-697f765b44-77s6g" event={"ID":"13fe806f-0e4e-4ea7-838b-938c5fe74c99","Type":"ContainerStarted","Data":"ee6633ae7d4c2991890663755df7dea48bc2b11a8fff2a845c44928c91a27090"} Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.270873 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85f7fb587d-lk8cm" event={"ID":"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d","Type":"ContainerStarted","Data":"0964dccde0b5c76aeb43c1e10947a261ea6a671b47c328ed331ba8d124f92931"} Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.270967 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85f7fb587d-lk8cm" event={"ID":"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d","Type":"ContainerStarted","Data":"361154f06b2adce70c209fd6d3ee1a7e31b251c0ff0e58387aeb8390a251434a"} Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.270979 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85f7fb587d-lk8cm" event={"ID":"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d","Type":"ContainerStarted","Data":"00347897abfc1251df651a739eeb6d2b11e283731003a9bc54a2174c6b7b8945"} Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.271779 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.285910 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-796d5f888-r5f9l" event={"ID":"34d354c8-b18e-458f-b050-1e3fa676c220","Type":"ContainerStarted","Data":"a46af4adcc218820a252fabecc73325edd071cc0bb28905b46b24aa8c9c50aeb"} Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.285990 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-796d5f888-r5f9l" event={"ID":"34d354c8-b18e-458f-b050-1e3fa676c220","Type":"ContainerStarted","Data":"89f411c4c01f370fbfb681453ae9467dc67b254e4dc44d28603dca0ea82a6281"} Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.286007 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-796d5f888-r5f9l" event={"ID":"34d354c8-b18e-458f-b050-1e3fa676c220","Type":"ContainerStarted","Data":"975e2caae16a8b8a55c6e92a7e9e1c08dbba34b9215e51beee95bae7cca77bc5"} Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.287180 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.287285 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.296418 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-78bf4bbdb7-6fpl9" event={"ID":"fccac7f9-eeaa-4481-ab49-9e71dd8af79c","Type":"ContainerStarted","Data":"5b9550d1305528ae1b51d93dfd6bb36724b1d7e88e1bee75f3e54ad303dd2119"} Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.320852 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-849cf44bc5-9qnb4" event={"ID":"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00","Type":"ContainerStarted","Data":"fc2ee08b550a6d641e24574700b1a69c677041fd959ed673c56e5c5a970cb8f4"} Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.327587 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-85f7fb587d-lk8cm" podStartSLOduration=3.327564906 podStartE2EDuration="3.327564906s" podCreationTimestamp="2025-10-06 21:49:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:35.288811287 +0000 UTC m=+1120.581848011" watchObservedRunningTime="2025-10-06 21:49:35.327564906 +0000 UTC m=+1120.620601630" Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.339300 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-796d5f888-r5f9l" podStartSLOduration=3.339276268 podStartE2EDuration="3.339276268s" podCreationTimestamp="2025-10-06 21:49:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:35.314904465 +0000 UTC m=+1120.607941209" watchObservedRunningTime="2025-10-06 21:49:35.339276268 +0000 UTC m=+1120.632313002" Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.368029 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f4ddcc578-kbrhw" event={"ID":"411ad591-8dad-46ef-8a44-88e86f5c86dd","Type":"ContainerStarted","Data":"e8673788407b2a922e671f5a29648d8cec35c573c85be82d267e82064c4a8203"} Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.368093 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f4ddcc578-kbrhw" event={"ID":"411ad591-8dad-46ef-8a44-88e86f5c86dd","Type":"ContainerStarted","Data":"9e1f60c39c5b159a972961d88da6cb37576b5e416b62ae8289bdf2915b1ab1f7"} Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.368205 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.374689 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.374740 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.374755 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.374765 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.374782 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.426117 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-f4ddcc578-kbrhw" podStartSLOduration=3.426093953 podStartE2EDuration="3.426093953s" podCreationTimestamp="2025-10-06 21:49:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:35.41688713 +0000 UTC m=+1120.709923864" watchObservedRunningTime="2025-10-06 21:49:35.426093953 +0000 UTC m=+1120.719130687" Oct 06 21:49:35 crc kubenswrapper[5014]: I1006 21:49:35.970453 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.060312 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-svqwg\" (UniqueName: \"kubernetes.io/projected/c9302ee6-cd4b-4819-8dfe-c484385007de-kube-api-access-svqwg\") pod \"c9302ee6-cd4b-4819-8dfe-c484385007de\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.060378 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-dns-svc\") pod \"c9302ee6-cd4b-4819-8dfe-c484385007de\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.060453 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-ovsdbserver-nb\") pod \"c9302ee6-cd4b-4819-8dfe-c484385007de\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.060530 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-dns-swift-storage-0\") pod \"c9302ee6-cd4b-4819-8dfe-c484385007de\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.060687 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-ovsdbserver-sb\") pod \"c9302ee6-cd4b-4819-8dfe-c484385007de\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.060873 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-config\") pod \"c9302ee6-cd4b-4819-8dfe-c484385007de\" (UID: \"c9302ee6-cd4b-4819-8dfe-c484385007de\") " Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.093684 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9302ee6-cd4b-4819-8dfe-c484385007de-kube-api-access-svqwg" (OuterVolumeSpecName: "kube-api-access-svqwg") pod "c9302ee6-cd4b-4819-8dfe-c484385007de" (UID: "c9302ee6-cd4b-4819-8dfe-c484385007de"). InnerVolumeSpecName "kube-api-access-svqwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.104190 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c9302ee6-cd4b-4819-8dfe-c484385007de" (UID: "c9302ee6-cd4b-4819-8dfe-c484385007de"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.123077 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c9302ee6-cd4b-4819-8dfe-c484385007de" (UID: "c9302ee6-cd4b-4819-8dfe-c484385007de"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.144411 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c9302ee6-cd4b-4819-8dfe-c484385007de" (UID: "c9302ee6-cd4b-4819-8dfe-c484385007de"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.166717 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.166770 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.166806 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-svqwg\" (UniqueName: \"kubernetes.io/projected/c9302ee6-cd4b-4819-8dfe-c484385007de-kube-api-access-svqwg\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.166825 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.208881 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c9302ee6-cd4b-4819-8dfe-c484385007de" (UID: "c9302ee6-cd4b-4819-8dfe-c484385007de"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.226547 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-config" (OuterVolumeSpecName: "config") pod "c9302ee6-cd4b-4819-8dfe-c484385007de" (UID: "c9302ee6-cd4b-4819-8dfe-c484385007de"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.264100 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-f6fff5c8f-xbgr9"] Oct 06 21:49:36 crc kubenswrapper[5014]: E1006 21:49:36.264884 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9302ee6-cd4b-4819-8dfe-c484385007de" containerName="init" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.264903 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9302ee6-cd4b-4819-8dfe-c484385007de" containerName="init" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.265213 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9302ee6-cd4b-4819-8dfe-c484385007de" containerName="init" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.276847 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.276890 5014 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c9302ee6-cd4b-4819-8dfe-c484385007de-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.328767 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.335543 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-f6fff5c8f-xbgr9"] Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.349451 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.350558 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.395280 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzl4l\" (UniqueName: \"kubernetes.io/projected/7eeb278b-517f-4b26-825e-12d7d0d969ce-kube-api-access-nzl4l\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.395342 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-internal-tls-certs\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.395387 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-public-tls-certs\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.395430 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-httpd-config\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.395474 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-config\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.395500 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-combined-ca-bundle\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.395535 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-ovndb-tls-certs\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.413967 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-78bf4bbdb7-6fpl9" event={"ID":"fccac7f9-eeaa-4481-ab49-9e71dd8af79c","Type":"ContainerStarted","Data":"366d9c7d12dc53287d03434a10b7448a9c6d02142fd4ec78034d67e5b49d8e4b"} Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.414153 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.417429 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" event={"ID":"c9302ee6-cd4b-4819-8dfe-c484385007de","Type":"ContainerDied","Data":"5715ef7593729c61a2ce9fad9c07f04b0d0b24b374f789c7ab824bfe888d0f46"} Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.417979 5014 scope.go:117] "RemoveContainer" containerID="863163d64e48f22b45831f39f3b190616d29f7782a4fff4c34bb934a57fc2ff3" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.418188 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6ffd8547bc-7wfft" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.443858 5014 generic.go:334] "Generic (PLEG): container finished" podID="8248bd19-6009-4b21-9c52-1d016e1bfef2" containerID="a024f53b70fe1b3c5654b36136f202ab253bdd2be9ca0595375efe50d2b2a3db" exitCode=0 Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.445294 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cddb74997-nnd26" event={"ID":"8248bd19-6009-4b21-9c52-1d016e1bfef2","Type":"ContainerDied","Data":"a024f53b70fe1b3c5654b36136f202ab253bdd2be9ca0595375efe50d2b2a3db"} Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.450038 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-78bf4bbdb7-6fpl9" podStartSLOduration=3.449966034 podStartE2EDuration="3.449966034s" podCreationTimestamp="2025-10-06 21:49:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:36.437698045 +0000 UTC m=+1121.730734779" watchObservedRunningTime="2025-10-06 21:49:36.449966034 +0000 UTC m=+1121.743002768" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.498175 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-config\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.498228 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-combined-ca-bundle\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.498323 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-ovndb-tls-certs\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.498646 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzl4l\" (UniqueName: \"kubernetes.io/projected/7eeb278b-517f-4b26-825e-12d7d0d969ce-kube-api-access-nzl4l\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.498677 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-internal-tls-certs\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.498724 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-public-tls-certs\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.498856 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-httpd-config\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.507565 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-httpd-config\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.514998 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-config\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.515586 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-ovndb-tls-certs\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.525340 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-internal-tls-certs\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.543706 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-public-tls-certs\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.547700 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-combined-ca-bundle\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.559436 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzl4l\" (UniqueName: \"kubernetes.io/projected/7eeb278b-517f-4b26-825e-12d7d0d969ce-kube-api-access-nzl4l\") pod \"neutron-f6fff5c8f-xbgr9\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.575671 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6ffd8547bc-7wfft"] Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.593517 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6ffd8547bc-7wfft"] Oct 06 21:49:36 crc kubenswrapper[5014]: I1006 21:49:36.707026 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:37 crc kubenswrapper[5014]: I1006 21:49:37.460435 5014 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 06 21:49:37 crc kubenswrapper[5014]: I1006 21:49:37.461144 5014 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 06 21:49:37 crc kubenswrapper[5014]: I1006 21:49:37.461836 5014 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 06 21:49:37 crc kubenswrapper[5014]: I1006 21:49:37.461856 5014 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 06 21:49:37 crc kubenswrapper[5014]: I1006 21:49:37.461931 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cddb74997-nnd26" event={"ID":"8248bd19-6009-4b21-9c52-1d016e1bfef2","Type":"ContainerStarted","Data":"27eb46dd9464b7adab1e8a72a4c5823879975f08d71d6d76de893edca8a709d9"} Oct 06 21:49:37 crc kubenswrapper[5014]: I1006 21:49:37.461961 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:37 crc kubenswrapper[5014]: I1006 21:49:37.491559 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cddb74997-nnd26" podStartSLOduration=5.491525658 podStartE2EDuration="5.491525658s" podCreationTimestamp="2025-10-06 21:49:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:37.488493112 +0000 UTC m=+1122.781529846" watchObservedRunningTime="2025-10-06 21:49:37.491525658 +0000 UTC m=+1122.784562392" Oct 06 21:49:37 crc kubenswrapper[5014]: I1006 21:49:37.514678 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9302ee6-cd4b-4819-8dfe-c484385007de" path="/var/lib/kubelet/pods/c9302ee6-cd4b-4819-8dfe-c484385007de/volumes" Oct 06 21:49:37 crc kubenswrapper[5014]: I1006 21:49:37.604515 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-f6fff5c8f-xbgr9"] Oct 06 21:49:38 crc kubenswrapper[5014]: W1006 21:49:38.222564 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7eeb278b_517f_4b26_825e_12d7d0d969ce.slice/crio-d4cb4b18ad9fe5bb0744cb2b18016a4335ead96a41d98c32ec1dacef032177fd WatchSource:0}: Error finding container d4cb4b18ad9fe5bb0744cb2b18016a4335ead96a41d98c32ec1dacef032177fd: Status 404 returned error can't find the container with id d4cb4b18ad9fe5bb0744cb2b18016a4335ead96a41d98c32ec1dacef032177fd Oct 06 21:49:38 crc kubenswrapper[5014]: I1006 21:49:38.502911 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f6fff5c8f-xbgr9" event={"ID":"7eeb278b-517f-4b26-825e-12d7d0d969ce","Type":"ContainerStarted","Data":"d4cb4b18ad9fe5bb0744cb2b18016a4335ead96a41d98c32ec1dacef032177fd"} Oct 06 21:49:38 crc kubenswrapper[5014]: I1006 21:49:38.667465 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 06 21:49:38 crc kubenswrapper[5014]: I1006 21:49:38.667555 5014 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 06 21:49:38 crc kubenswrapper[5014]: I1006 21:49:38.844492 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 06 21:49:38 crc kubenswrapper[5014]: I1006 21:49:38.844696 5014 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 06 21:49:38 crc kubenswrapper[5014]: I1006 21:49:38.850312 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.188934 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-598975567d-rtcs4"] Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.191236 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.194918 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.195368 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.215811 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-598975567d-rtcs4"] Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.243761 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.294722 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-internal-tls-certs\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.295345 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-combined-ca-bundle\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.295448 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd07ab2-973f-4531-8e5f-68d349e231b4-logs\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.295475 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-config-data-custom\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.295520 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pml5d\" (UniqueName: \"kubernetes.io/projected/0bd07ab2-973f-4531-8e5f-68d349e231b4-kube-api-access-pml5d\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.295541 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-config-data\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.295582 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-public-tls-certs\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.404049 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-internal-tls-certs\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.404468 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-combined-ca-bundle\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.404662 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd07ab2-973f-4531-8e5f-68d349e231b4-logs\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.404798 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-config-data-custom\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.404923 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pml5d\" (UniqueName: \"kubernetes.io/projected/0bd07ab2-973f-4531-8e5f-68d349e231b4-kube-api-access-pml5d\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.405026 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-config-data\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.405129 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-public-tls-certs\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.406070 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd07ab2-973f-4531-8e5f-68d349e231b4-logs\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.409873 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-config-data-custom\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.410869 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-public-tls-certs\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.412816 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-combined-ca-bundle\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.412831 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-config-data\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.413486 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-internal-tls-certs\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.433517 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pml5d\" (UniqueName: \"kubernetes.io/projected/0bd07ab2-973f-4531-8e5f-68d349e231b4-kube-api-access-pml5d\") pod \"barbican-api-598975567d-rtcs4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:39 crc kubenswrapper[5014]: I1006 21:49:39.527463 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:40 crc kubenswrapper[5014]: I1006 21:49:40.248289 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-598975567d-rtcs4"] Oct 06 21:49:40 crc kubenswrapper[5014]: W1006 21:49:40.264191 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0bd07ab2_973f_4531_8e5f_68d349e231b4.slice/crio-54b318fe0eb02a773a8a8e3f52da3411fd766c297175440d422e0326e5e4ff38 WatchSource:0}: Error finding container 54b318fe0eb02a773a8a8e3f52da3411fd766c297175440d422e0326e5e4ff38: Status 404 returned error can't find the container with id 54b318fe0eb02a773a8a8e3f52da3411fd766c297175440d422e0326e5e4ff38 Oct 06 21:49:40 crc kubenswrapper[5014]: I1006 21:49:40.550480 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-697f765b44-77s6g" event={"ID":"13fe806f-0e4e-4ea7-838b-938c5fe74c99","Type":"ContainerStarted","Data":"dec3beff6abbdcf32f4d602873fe4ab229755aef532b1c3308ed69b08438e50e"} Oct 06 21:49:40 crc kubenswrapper[5014]: I1006 21:49:40.550593 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-697f765b44-77s6g" event={"ID":"13fe806f-0e4e-4ea7-838b-938c5fe74c99","Type":"ContainerStarted","Data":"7c61f00177c33532524b8163faef3133d7c88a0c6edeaf8d14eb4a6022f7abdd"} Oct 06 21:49:40 crc kubenswrapper[5014]: I1006 21:49:40.559130 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-598975567d-rtcs4" event={"ID":"0bd07ab2-973f-4531-8e5f-68d349e231b4","Type":"ContainerStarted","Data":"54b318fe0eb02a773a8a8e3f52da3411fd766c297175440d422e0326e5e4ff38"} Oct 06 21:49:40 crc kubenswrapper[5014]: I1006 21:49:40.574072 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f6fff5c8f-xbgr9" event={"ID":"7eeb278b-517f-4b26-825e-12d7d0d969ce","Type":"ContainerStarted","Data":"48a0f810064b04d6c7a9053d863e731b5f0654d50ec97630c69d7ccb6c16e34f"} Oct 06 21:49:40 crc kubenswrapper[5014]: I1006 21:49:40.574227 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:49:40 crc kubenswrapper[5014]: I1006 21:49:40.574267 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f6fff5c8f-xbgr9" event={"ID":"7eeb278b-517f-4b26-825e-12d7d0d969ce","Type":"ContainerStarted","Data":"7313b7e6181c156a532c5a8a8da1000e022b6207ee08165f639a58cf73fc9b6a"} Oct 06 21:49:40 crc kubenswrapper[5014]: I1006 21:49:40.601391 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-849cf44bc5-9qnb4" event={"ID":"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00","Type":"ContainerStarted","Data":"6e30f54354b4e05d72cd38208e8b8588c2363ab84617b170457752ebb404a286"} Oct 06 21:49:40 crc kubenswrapper[5014]: I1006 21:49:40.601463 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-849cf44bc5-9qnb4" event={"ID":"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00","Type":"ContainerStarted","Data":"cc9f5a4202ce6f9f16ab8d8453f50c25c26d08e6f673de106491b3f0b64a0984"} Oct 06 21:49:40 crc kubenswrapper[5014]: I1006 21:49:40.646033 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-697f765b44-77s6g" podStartSLOduration=4.059920251 podStartE2EDuration="8.646005336s" podCreationTimestamp="2025-10-06 21:49:32 +0000 UTC" firstStartedPulling="2025-10-06 21:49:34.888761895 +0000 UTC m=+1120.181798629" lastFinishedPulling="2025-10-06 21:49:39.47484698 +0000 UTC m=+1124.767883714" observedRunningTime="2025-10-06 21:49:40.588820551 +0000 UTC m=+1125.881857285" watchObservedRunningTime="2025-10-06 21:49:40.646005336 +0000 UTC m=+1125.939042070" Oct 06 21:49:40 crc kubenswrapper[5014]: I1006 21:49:40.654894 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-849cf44bc5-9qnb4" podStartSLOduration=4.037328184 podStartE2EDuration="8.654860836s" podCreationTimestamp="2025-10-06 21:49:32 +0000 UTC" firstStartedPulling="2025-10-06 21:49:34.832613314 +0000 UTC m=+1120.125650048" lastFinishedPulling="2025-10-06 21:49:39.450145966 +0000 UTC m=+1124.743182700" observedRunningTime="2025-10-06 21:49:40.649656601 +0000 UTC m=+1125.942693335" watchObservedRunningTime="2025-10-06 21:49:40.654860836 +0000 UTC m=+1125.947897570" Oct 06 21:49:40 crc kubenswrapper[5014]: I1006 21:49:40.659214 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-f6fff5c8f-xbgr9" podStartSLOduration=4.659194254 podStartE2EDuration="4.659194254s" podCreationTimestamp="2025-10-06 21:49:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:40.621414895 +0000 UTC m=+1125.914451629" watchObservedRunningTime="2025-10-06 21:49:40.659194254 +0000 UTC m=+1125.952230988" Oct 06 21:49:41 crc kubenswrapper[5014]: I1006 21:49:41.616568 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-79rxq" event={"ID":"b198c4e1-6133-4729-b58a-c83946d45a5d","Type":"ContainerStarted","Data":"4cf6f0f2d03df260425ec2e087b93c533a13a88a7decf7ca78408daf3686e46c"} Oct 06 21:49:41 crc kubenswrapper[5014]: I1006 21:49:41.625790 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-598975567d-rtcs4" event={"ID":"0bd07ab2-973f-4531-8e5f-68d349e231b4","Type":"ContainerStarted","Data":"179498d178eaf12fad9776b3020c6b558f9bd713b5bcdcb7b15c957545542e01"} Oct 06 21:49:41 crc kubenswrapper[5014]: I1006 21:49:41.625863 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-598975567d-rtcs4" event={"ID":"0bd07ab2-973f-4531-8e5f-68d349e231b4","Type":"ContainerStarted","Data":"8f5fff8a69f244b622369e3d3222e772ceb03080fc79e98a2126892aa6220818"} Oct 06 21:49:41 crc kubenswrapper[5014]: I1006 21:49:41.627786 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:41 crc kubenswrapper[5014]: I1006 21:49:41.628101 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:41 crc kubenswrapper[5014]: I1006 21:49:41.674158 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-79rxq" podStartSLOduration=4.4891031550000005 podStartE2EDuration="39.674141502s" podCreationTimestamp="2025-10-06 21:49:02 +0000 UTC" firstStartedPulling="2025-10-06 21:49:04.291734615 +0000 UTC m=+1089.584771359" lastFinishedPulling="2025-10-06 21:49:39.476772972 +0000 UTC m=+1124.769809706" observedRunningTime="2025-10-06 21:49:41.661923665 +0000 UTC m=+1126.954960409" watchObservedRunningTime="2025-10-06 21:49:41.674141502 +0000 UTC m=+1126.967178236" Oct 06 21:49:41 crc kubenswrapper[5014]: I1006 21:49:41.702930 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-598975567d-rtcs4" podStartSLOduration=2.702911245 podStartE2EDuration="2.702911245s" podCreationTimestamp="2025-10-06 21:49:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:41.693131415 +0000 UTC m=+1126.986168149" watchObservedRunningTime="2025-10-06 21:49:41.702911245 +0000 UTC m=+1126.995947969" Oct 06 21:49:43 crc kubenswrapper[5014]: I1006 21:49:43.569817 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:49:43 crc kubenswrapper[5014]: I1006 21:49:43.646944 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77dd5cf987-sjkjh"] Oct 06 21:49:43 crc kubenswrapper[5014]: I1006 21:49:43.647181 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" podUID="0a05bafc-b81d-4f0e-9d9f-2807c0586c9e" containerName="dnsmasq-dns" containerID="cri-o://14cfbab9388c4de2eada217b75e563a766b9d49fbb94e8750e2a5fdaaac974a0" gracePeriod=10 Oct 06 21:49:43 crc kubenswrapper[5014]: E1006 21:49:43.877736 5014 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a05bafc_b81d_4f0e_9d9f_2807c0586c9e.slice/crio-14cfbab9388c4de2eada217b75e563a766b9d49fbb94e8750e2a5fdaaac974a0.scope\": RecentStats: unable to find data in memory cache]" Oct 06 21:49:44 crc kubenswrapper[5014]: I1006 21:49:44.684589 5014 generic.go:334] "Generic (PLEG): container finished" podID="0a05bafc-b81d-4f0e-9d9f-2807c0586c9e" containerID="14cfbab9388c4de2eada217b75e563a766b9d49fbb94e8750e2a5fdaaac974a0" exitCode=0 Oct 06 21:49:44 crc kubenswrapper[5014]: I1006 21:49:44.684662 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" event={"ID":"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e","Type":"ContainerDied","Data":"14cfbab9388c4de2eada217b75e563a766b9d49fbb94e8750e2a5fdaaac974a0"} Oct 06 21:49:45 crc kubenswrapper[5014]: I1006 21:49:45.521522 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:46 crc kubenswrapper[5014]: I1006 21:49:46.114773 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.721288 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.743206 5014 generic.go:334] "Generic (PLEG): container finished" podID="b198c4e1-6133-4729-b58a-c83946d45a5d" containerID="4cf6f0f2d03df260425ec2e087b93c533a13a88a7decf7ca78408daf3686e46c" exitCode=0 Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.743276 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-79rxq" event={"ID":"b198c4e1-6133-4729-b58a-c83946d45a5d","Type":"ContainerDied","Data":"4cf6f0f2d03df260425ec2e087b93c533a13a88a7decf7ca78408daf3686e46c"} Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.745270 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" event={"ID":"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e","Type":"ContainerDied","Data":"b0afb201ed5bceee9d10313a5830d8a20e46e87c896313b9f30c5d23a835591c"} Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.745339 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.745630 5014 scope.go:117] "RemoveContainer" containerID="14cfbab9388c4de2eada217b75e563a766b9d49fbb94e8750e2a5fdaaac974a0" Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.792885 5014 scope.go:117] "RemoveContainer" containerID="34e130e06f1a53f66bd521eb3f46b5f179a77b35008bb9a6c23af351993228be" Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.871498 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nwbq7\" (UniqueName: \"kubernetes.io/projected/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-kube-api-access-nwbq7\") pod \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.871987 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-ovsdbserver-nb\") pod \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.872034 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-config\") pod \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.872132 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-dns-svc\") pod \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.872155 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-dns-swift-storage-0\") pod \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.872232 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-ovsdbserver-sb\") pod \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\" (UID: \"0a05bafc-b81d-4f0e-9d9f-2807c0586c9e\") " Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.876514 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-kube-api-access-nwbq7" (OuterVolumeSpecName: "kube-api-access-nwbq7") pod "0a05bafc-b81d-4f0e-9d9f-2807c0586c9e" (UID: "0a05bafc-b81d-4f0e-9d9f-2807c0586c9e"). InnerVolumeSpecName "kube-api-access-nwbq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.915671 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0a05bafc-b81d-4f0e-9d9f-2807c0586c9e" (UID: "0a05bafc-b81d-4f0e-9d9f-2807c0586c9e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.925401 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0a05bafc-b81d-4f0e-9d9f-2807c0586c9e" (UID: "0a05bafc-b81d-4f0e-9d9f-2807c0586c9e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.938766 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-config" (OuterVolumeSpecName: "config") pod "0a05bafc-b81d-4f0e-9d9f-2807c0586c9e" (UID: "0a05bafc-b81d-4f0e-9d9f-2807c0586c9e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.943091 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0a05bafc-b81d-4f0e-9d9f-2807c0586c9e" (UID: "0a05bafc-b81d-4f0e-9d9f-2807c0586c9e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.952290 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0a05bafc-b81d-4f0e-9d9f-2807c0586c9e" (UID: "0a05bafc-b81d-4f0e-9d9f-2807c0586c9e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.973962 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.973993 5014 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.974006 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.974017 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nwbq7\" (UniqueName: \"kubernetes.io/projected/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-kube-api-access-nwbq7\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.974026 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:48 crc kubenswrapper[5014]: I1006 21:49:48.974036 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:49 crc kubenswrapper[5014]: I1006 21:49:49.112746 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77dd5cf987-sjkjh"] Oct 06 21:49:49 crc kubenswrapper[5014]: I1006 21:49:49.121521 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77dd5cf987-sjkjh"] Oct 06 21:49:49 crc kubenswrapper[5014]: I1006 21:49:49.497842 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a05bafc-b81d-4f0e-9d9f-2807c0586c9e" path="/var/lib/kubelet/pods/0a05bafc-b81d-4f0e-9d9f-2807c0586c9e/volumes" Oct 06 21:49:49 crc kubenswrapper[5014]: I1006 21:49:49.755774 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8a58cf95-3f4c-4369-acbf-117df5f667be","Type":"ContainerStarted","Data":"aa292c89c90af0f3ff521929d202a34ac8faf0d61d557bec907a3dbadde539fa"} Oct 06 21:49:49 crc kubenswrapper[5014]: I1006 21:49:49.755940 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerName="ceilometer-central-agent" containerID="cri-o://8704f70d1a64b2ccd55f5a03a5d7a1871a20054703ef04bead5ca8fd6eb3c37f" gracePeriod=30 Oct 06 21:49:49 crc kubenswrapper[5014]: I1006 21:49:49.756212 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 06 21:49:49 crc kubenswrapper[5014]: I1006 21:49:49.756439 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerName="proxy-httpd" containerID="cri-o://aa292c89c90af0f3ff521929d202a34ac8faf0d61d557bec907a3dbadde539fa" gracePeriod=30 Oct 06 21:49:49 crc kubenswrapper[5014]: I1006 21:49:49.756483 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerName="sg-core" containerID="cri-o://024baa570e337a154f170236a97d79d9c8c1a615149a09636a6a026c16bece73" gracePeriod=30 Oct 06 21:49:49 crc kubenswrapper[5014]: I1006 21:49:49.756515 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerName="ceilometer-notification-agent" containerID="cri-o://c7ce71fae308be7a43355aa94b0d535ffb0ce183d6608339015ec6523d282785" gracePeriod=30 Oct 06 21:49:49 crc kubenswrapper[5014]: I1006 21:49:49.784840 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.6066077500000002 podStartE2EDuration="47.784819505s" podCreationTimestamp="2025-10-06 21:49:02 +0000 UTC" firstStartedPulling="2025-10-06 21:49:04.291383644 +0000 UTC m=+1089.584420398" lastFinishedPulling="2025-10-06 21:49:48.469595419 +0000 UTC m=+1133.762632153" observedRunningTime="2025-10-06 21:49:49.78150904 +0000 UTC m=+1135.074545784" watchObservedRunningTime="2025-10-06 21:49:49.784819505 +0000 UTC m=+1135.077856239" Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.110301 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.206906 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-config-data\") pod \"b198c4e1-6133-4729-b58a-c83946d45a5d\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.206965 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-scripts\") pod \"b198c4e1-6133-4729-b58a-c83946d45a5d\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.207087 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwzdw\" (UniqueName: \"kubernetes.io/projected/b198c4e1-6133-4729-b58a-c83946d45a5d-kube-api-access-wwzdw\") pod \"b198c4e1-6133-4729-b58a-c83946d45a5d\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.207166 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-db-sync-config-data\") pod \"b198c4e1-6133-4729-b58a-c83946d45a5d\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.207266 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-combined-ca-bundle\") pod \"b198c4e1-6133-4729-b58a-c83946d45a5d\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.208025 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b198c4e1-6133-4729-b58a-c83946d45a5d-etc-machine-id\") pod \"b198c4e1-6133-4729-b58a-c83946d45a5d\" (UID: \"b198c4e1-6133-4729-b58a-c83946d45a5d\") " Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.208117 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b198c4e1-6133-4729-b58a-c83946d45a5d-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b198c4e1-6133-4729-b58a-c83946d45a5d" (UID: "b198c4e1-6133-4729-b58a-c83946d45a5d"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.208666 5014 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b198c4e1-6133-4729-b58a-c83946d45a5d-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.214723 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b198c4e1-6133-4729-b58a-c83946d45a5d" (UID: "b198c4e1-6133-4729-b58a-c83946d45a5d"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.214952 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b198c4e1-6133-4729-b58a-c83946d45a5d-kube-api-access-wwzdw" (OuterVolumeSpecName: "kube-api-access-wwzdw") pod "b198c4e1-6133-4729-b58a-c83946d45a5d" (UID: "b198c4e1-6133-4729-b58a-c83946d45a5d"). InnerVolumeSpecName "kube-api-access-wwzdw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.217820 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-scripts" (OuterVolumeSpecName: "scripts") pod "b198c4e1-6133-4729-b58a-c83946d45a5d" (UID: "b198c4e1-6133-4729-b58a-c83946d45a5d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.232925 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b198c4e1-6133-4729-b58a-c83946d45a5d" (UID: "b198c4e1-6133-4729-b58a-c83946d45a5d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.253878 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-config-data" (OuterVolumeSpecName: "config-data") pod "b198c4e1-6133-4729-b58a-c83946d45a5d" (UID: "b198c4e1-6133-4729-b58a-c83946d45a5d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.311325 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.311378 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.311388 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.311399 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwzdw\" (UniqueName: \"kubernetes.io/projected/b198c4e1-6133-4729-b58a-c83946d45a5d-kube-api-access-wwzdw\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.311434 5014 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b198c4e1-6133-4729-b58a-c83946d45a5d-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.834293 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-79rxq" event={"ID":"b198c4e1-6133-4729-b58a-c83946d45a5d","Type":"ContainerDied","Data":"bc157548cc65148f2aec070d4a827a713e3e84b5bdb1f654b757fe3efe9566e6"} Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.834369 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc157548cc65148f2aec070d4a827a713e3e84b5bdb1f654b757fe3efe9566e6" Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.834506 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-79rxq" Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.842413 5014 generic.go:334] "Generic (PLEG): container finished" podID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerID="aa292c89c90af0f3ff521929d202a34ac8faf0d61d557bec907a3dbadde539fa" exitCode=0 Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.842455 5014 generic.go:334] "Generic (PLEG): container finished" podID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerID="024baa570e337a154f170236a97d79d9c8c1a615149a09636a6a026c16bece73" exitCode=2 Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.842464 5014 generic.go:334] "Generic (PLEG): container finished" podID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerID="8704f70d1a64b2ccd55f5a03a5d7a1871a20054703ef04bead5ca8fd6eb3c37f" exitCode=0 Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.842493 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8a58cf95-3f4c-4369-acbf-117df5f667be","Type":"ContainerDied","Data":"aa292c89c90af0f3ff521929d202a34ac8faf0d61d557bec907a3dbadde539fa"} Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.842532 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8a58cf95-3f4c-4369-acbf-117df5f667be","Type":"ContainerDied","Data":"024baa570e337a154f170236a97d79d9c8c1a615149a09636a6a026c16bece73"} Oct 06 21:49:50 crc kubenswrapper[5014]: I1006 21:49:50.842545 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8a58cf95-3f4c-4369-acbf-117df5f667be","Type":"ContainerDied","Data":"8704f70d1a64b2ccd55f5a03a5d7a1871a20054703ef04bead5ca8fd6eb3c37f"} Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.020161 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.020593 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.128570 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Oct 06 21:49:51 crc kubenswrapper[5014]: E1006 21:49:51.128949 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a05bafc-b81d-4f0e-9d9f-2807c0586c9e" containerName="init" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.128964 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a05bafc-b81d-4f0e-9d9f-2807c0586c9e" containerName="init" Oct 06 21:49:51 crc kubenswrapper[5014]: E1006 21:49:51.128978 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b198c4e1-6133-4729-b58a-c83946d45a5d" containerName="cinder-db-sync" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.128986 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="b198c4e1-6133-4729-b58a-c83946d45a5d" containerName="cinder-db-sync" Oct 06 21:49:51 crc kubenswrapper[5014]: E1006 21:49:51.129003 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a05bafc-b81d-4f0e-9d9f-2807c0586c9e" containerName="dnsmasq-dns" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.129009 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a05bafc-b81d-4f0e-9d9f-2807c0586c9e" containerName="dnsmasq-dns" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.129387 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a05bafc-b81d-4f0e-9d9f-2807c0586c9e" containerName="dnsmasq-dns" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.129405 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="b198c4e1-6133-4729-b58a-c83946d45a5d" containerName="cinder-db-sync" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.130295 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.140414 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.140963 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.141218 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.144383 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-8h4pd" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.167239 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-796d5f888-r5f9l"] Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.167582 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-796d5f888-r5f9l" podUID="34d354c8-b18e-458f-b050-1e3fa676c220" containerName="barbican-api-log" containerID="cri-o://89f411c4c01f370fbfb681453ae9467dc67b254e4dc44d28603dca0ea82a6281" gracePeriod=30 Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.167761 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-796d5f888-r5f9l" podUID="34d354c8-b18e-458f-b050-1e3fa676c220" containerName="barbican-api" containerID="cri-o://a46af4adcc218820a252fabecc73325edd071cc0bb28905b46b24aa8c9c50aeb" gracePeriod=30 Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.173331 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.223449 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-59b9656b65-wz49g"] Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.225809 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.235948 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxw8m\" (UniqueName: \"kubernetes.io/projected/9acabca3-5194-4034-9a64-ccdf44c42e4e-kube-api-access-rxw8m\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.236007 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-config-data\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.236048 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.236083 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9acabca3-5194-4034-9a64-ccdf44c42e4e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.236129 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.236150 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-scripts\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.268676 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59b9656b65-wz49g"] Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.343833 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-dns-swift-storage-0\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.343943 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.343998 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-scripts\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.344084 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-config\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.344127 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmmct\" (UniqueName: \"kubernetes.io/projected/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-kube-api-access-hmmct\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.344160 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-ovsdbserver-sb\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.344283 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxw8m\" (UniqueName: \"kubernetes.io/projected/9acabca3-5194-4034-9a64-ccdf44c42e4e-kube-api-access-rxw8m\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.344389 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-config-data\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.344536 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-dns-svc\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.344575 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.344703 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9acabca3-5194-4034-9a64-ccdf44c42e4e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.344731 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-ovsdbserver-nb\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.344951 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9acabca3-5194-4034-9a64-ccdf44c42e4e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.359701 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-scripts\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.360940 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-config-data\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.362657 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.364047 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxw8m\" (UniqueName: \"kubernetes.io/projected/9acabca3-5194-4034-9a64-ccdf44c42e4e-kube-api-access-rxw8m\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.367896 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.390382 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.392213 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.398115 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.464678 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.465704 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-dns-swift-storage-0\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.465874 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-config\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.465917 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmmct\" (UniqueName: \"kubernetes.io/projected/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-kube-api-access-hmmct\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.465950 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-ovsdbserver-sb\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.468194 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-dns-swift-storage-0\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.469293 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-config\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.480378 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.481990 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-ovsdbserver-sb\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.482944 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-dns-svc\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.483124 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-ovsdbserver-nb\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.484142 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-ovsdbserver-nb\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.484869 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-dns-svc\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.501903 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmmct\" (UniqueName: \"kubernetes.io/projected/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-kube-api-access-hmmct\") pod \"dnsmasq-dns-59b9656b65-wz49g\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.549884 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.587034 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-config-data\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.587091 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-scripts\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.587183 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snmvf\" (UniqueName: \"kubernetes.io/projected/8486f280-519e-4f74-b023-b7bee7ca6053-kube-api-access-snmvf\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.587249 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8486f280-519e-4f74-b023-b7bee7ca6053-logs\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.587309 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.587351 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8486f280-519e-4f74-b023-b7bee7ca6053-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.588067 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-config-data-custom\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.693319 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-config-data-custom\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.693454 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-config-data\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.693487 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-scripts\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.693571 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snmvf\" (UniqueName: \"kubernetes.io/projected/8486f280-519e-4f74-b023-b7bee7ca6053-kube-api-access-snmvf\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.701017 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8486f280-519e-4f74-b023-b7bee7ca6053-logs\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.701236 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.701289 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8486f280-519e-4f74-b023-b7bee7ca6053-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.701571 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8486f280-519e-4f74-b023-b7bee7ca6053-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.702136 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8486f280-519e-4f74-b023-b7bee7ca6053-logs\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.702701 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-config-data-custom\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.702846 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-config-data\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.705485 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.706759 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-scripts\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.724376 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snmvf\" (UniqueName: \"kubernetes.io/projected/8486f280-519e-4f74-b023-b7bee7ca6053-kube-api-access-snmvf\") pod \"cinder-api-0\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.752203 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.877934 5014 generic.go:334] "Generic (PLEG): container finished" podID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerID="c7ce71fae308be7a43355aa94b0d535ffb0ce183d6608339015ec6523d282785" exitCode=0 Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.878002 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8a58cf95-3f4c-4369-acbf-117df5f667be","Type":"ContainerDied","Data":"c7ce71fae308be7a43355aa94b0d535ffb0ce183d6608339015ec6523d282785"} Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.881848 5014 generic.go:334] "Generic (PLEG): container finished" podID="34d354c8-b18e-458f-b050-1e3fa676c220" containerID="89f411c4c01f370fbfb681453ae9467dc67b254e4dc44d28603dca0ea82a6281" exitCode=143 Oct 06 21:49:51 crc kubenswrapper[5014]: I1006 21:49:51.882343 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-796d5f888-r5f9l" event={"ID":"34d354c8-b18e-458f-b050-1e3fa676c220","Type":"ContainerDied","Data":"89f411c4c01f370fbfb681453ae9467dc67b254e4dc44d28603dca0ea82a6281"} Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.096468 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59b9656b65-wz49g"] Oct 06 21:49:52 crc kubenswrapper[5014]: W1006 21:49:52.104568 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod92468d0e_94cb_4ca5_ad93_f8d6e7b9e2a8.slice/crio-8aa827338ce58ab0b3b7b862bc4a3dd8dc38fb2af9c230977528ab0ca3bb22f6 WatchSource:0}: Error finding container 8aa827338ce58ab0b3b7b862bc4a3dd8dc38fb2af9c230977528ab0ca3bb22f6: Status 404 returned error can't find the container with id 8aa827338ce58ab0b3b7b862bc4a3dd8dc38fb2af9c230977528ab0ca3bb22f6 Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.182387 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.216341 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.220875 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-sg-core-conf-yaml\") pod \"8a58cf95-3f4c-4369-acbf-117df5f667be\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.221222 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-config-data\") pod \"8a58cf95-3f4c-4369-acbf-117df5f667be\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.221251 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzhx4\" (UniqueName: \"kubernetes.io/projected/8a58cf95-3f4c-4369-acbf-117df5f667be-kube-api-access-lzhx4\") pod \"8a58cf95-3f4c-4369-acbf-117df5f667be\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.221285 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-scripts\") pod \"8a58cf95-3f4c-4369-acbf-117df5f667be\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.221334 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-combined-ca-bundle\") pod \"8a58cf95-3f4c-4369-acbf-117df5f667be\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.221363 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8a58cf95-3f4c-4369-acbf-117df5f667be-log-httpd\") pod \"8a58cf95-3f4c-4369-acbf-117df5f667be\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.221398 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8a58cf95-3f4c-4369-acbf-117df5f667be-run-httpd\") pod \"8a58cf95-3f4c-4369-acbf-117df5f667be\" (UID: \"8a58cf95-3f4c-4369-acbf-117df5f667be\") " Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.222069 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a58cf95-3f4c-4369-acbf-117df5f667be-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8a58cf95-3f4c-4369-acbf-117df5f667be" (UID: "8a58cf95-3f4c-4369-acbf-117df5f667be"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.223302 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a58cf95-3f4c-4369-acbf-117df5f667be-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8a58cf95-3f4c-4369-acbf-117df5f667be" (UID: "8a58cf95-3f4c-4369-acbf-117df5f667be"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.227197 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-scripts" (OuterVolumeSpecName: "scripts") pod "8a58cf95-3f4c-4369-acbf-117df5f667be" (UID: "8a58cf95-3f4c-4369-acbf-117df5f667be"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.229105 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a58cf95-3f4c-4369-acbf-117df5f667be-kube-api-access-lzhx4" (OuterVolumeSpecName: "kube-api-access-lzhx4") pod "8a58cf95-3f4c-4369-acbf-117df5f667be" (UID: "8a58cf95-3f4c-4369-acbf-117df5f667be"). InnerVolumeSpecName "kube-api-access-lzhx4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.324504 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzhx4\" (UniqueName: \"kubernetes.io/projected/8a58cf95-3f4c-4369-acbf-117df5f667be-kube-api-access-lzhx4\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.324755 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.324812 5014 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8a58cf95-3f4c-4369-acbf-117df5f667be-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.324879 5014 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8a58cf95-3f4c-4369-acbf-117df5f667be-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.344900 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.351709 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8a58cf95-3f4c-4369-acbf-117df5f667be" (UID: "8a58cf95-3f4c-4369-acbf-117df5f667be"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.363137 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8a58cf95-3f4c-4369-acbf-117df5f667be" (UID: "8a58cf95-3f4c-4369-acbf-117df5f667be"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.375536 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-config-data" (OuterVolumeSpecName: "config-data") pod "8a58cf95-3f4c-4369-acbf-117df5f667be" (UID: "8a58cf95-3f4c-4369-acbf-117df5f667be"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.426219 5014 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.426253 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.426263 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a58cf95-3f4c-4369-acbf-117df5f667be-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.900032 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8486f280-519e-4f74-b023-b7bee7ca6053","Type":"ContainerStarted","Data":"6847221a3243a36c5e3665480257b9186b0859f3785fae2ca41d2fa4f5281df5"} Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.902672 5014 generic.go:334] "Generic (PLEG): container finished" podID="92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8" containerID="105d7977d75a49d72314a402e90a068ea4d2d435cc3f786e02d4fc6e0dbe59c6" exitCode=0 Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.902720 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59b9656b65-wz49g" event={"ID":"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8","Type":"ContainerDied","Data":"105d7977d75a49d72314a402e90a068ea4d2d435cc3f786e02d4fc6e0dbe59c6"} Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.902738 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59b9656b65-wz49g" event={"ID":"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8","Type":"ContainerStarted","Data":"8aa827338ce58ab0b3b7b862bc4a3dd8dc38fb2af9c230977528ab0ca3bb22f6"} Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.915318 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8a58cf95-3f4c-4369-acbf-117df5f667be","Type":"ContainerDied","Data":"8a9f99650810fcd3755a788e5103334008d31df66fe0d09658a43767f2c3c0d7"} Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.915506 5014 scope.go:117] "RemoveContainer" containerID="aa292c89c90af0f3ff521929d202a34ac8faf0d61d557bec907a3dbadde539fa" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.915370 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:49:52 crc kubenswrapper[5014]: I1006 21:49:52.917071 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9acabca3-5194-4034-9a64-ccdf44c42e4e","Type":"ContainerStarted","Data":"1b15a617ba82e5830c1dafc78313d26f4d11c63e0363abccfc410aa665373684"} Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.054102 5014 scope.go:117] "RemoveContainer" containerID="024baa570e337a154f170236a97d79d9c8c1a615149a09636a6a026c16bece73" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.121667 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.150302 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.162766 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:49:53 crc kubenswrapper[5014]: E1006 21:49:53.163974 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerName="ceilometer-central-agent" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.163994 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerName="ceilometer-central-agent" Oct 06 21:49:53 crc kubenswrapper[5014]: E1006 21:49:53.164044 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerName="sg-core" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.164052 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerName="sg-core" Oct 06 21:49:53 crc kubenswrapper[5014]: E1006 21:49:53.164192 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerName="proxy-httpd" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.164205 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerName="proxy-httpd" Oct 06 21:49:53 crc kubenswrapper[5014]: E1006 21:49:53.164236 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerName="ceilometer-notification-agent" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.164246 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerName="ceilometer-notification-agent" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.164484 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerName="sg-core" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.164509 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerName="ceilometer-notification-agent" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.164524 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerName="ceilometer-central-agent" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.164553 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" containerName="proxy-httpd" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.185950 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.186060 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.188020 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.188139 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.196123 5014 scope.go:117] "RemoveContainer" containerID="c7ce71fae308be7a43355aa94b0d535ffb0ce183d6608339015ec6523d282785" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.223477 5014 scope.go:117] "RemoveContainer" containerID="8704f70d1a64b2ccd55f5a03a5d7a1871a20054703ef04bead5ca8fd6eb3c37f" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.244355 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.244425 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a75612c-8bae-463f-ae97-d13f279e1a11-log-httpd\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.244477 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-scripts\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.244529 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cd2bt\" (UniqueName: \"kubernetes.io/projected/9a75612c-8bae-463f-ae97-d13f279e1a11-kube-api-access-cd2bt\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.244661 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a75612c-8bae-463f-ae97-d13f279e1a11-run-httpd\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.244729 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.244851 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-config-data\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.350836 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-config-data\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.350927 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.350969 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a75612c-8bae-463f-ae97-d13f279e1a11-log-httpd\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.351003 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-scripts\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.351035 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cd2bt\" (UniqueName: \"kubernetes.io/projected/9a75612c-8bae-463f-ae97-d13f279e1a11-kube-api-access-cd2bt\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.351076 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a75612c-8bae-463f-ae97-d13f279e1a11-run-httpd\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.351124 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.351563 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a75612c-8bae-463f-ae97-d13f279e1a11-log-httpd\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.351680 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a75612c-8bae-463f-ae97-d13f279e1a11-run-httpd\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.354310 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.354875 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-scripts\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.358661 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.360884 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-config-data\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.376372 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cd2bt\" (UniqueName: \"kubernetes.io/projected/9a75612c-8bae-463f-ae97-d13f279e1a11-kube-api-access-cd2bt\") pod \"ceilometer-0\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.501152 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a58cf95-3f4c-4369-acbf-117df5f667be" path="/var/lib/kubelet/pods/8a58cf95-3f4c-4369-acbf-117df5f667be/volumes" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.507783 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77dd5cf987-sjkjh" podUID="0a05bafc-b81d-4f0e-9d9f-2807c0586c9e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.145:5353: i/o timeout" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.514144 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.860016 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.947996 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8486f280-519e-4f74-b023-b7bee7ca6053","Type":"ContainerStarted","Data":"ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba"} Oct 06 21:49:53 crc kubenswrapper[5014]: I1006 21:49:53.950569 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59b9656b65-wz49g" event={"ID":"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8","Type":"ContainerStarted","Data":"7941c65573392cc10a3090841b4d9629ca34fea0c3ee8c74bf69e96d3b8ee435"} Oct 06 21:49:54 crc kubenswrapper[5014]: I1006 21:49:54.008859 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-59b9656b65-wz49g" podStartSLOduration=3.008835102 podStartE2EDuration="3.008835102s" podCreationTimestamp="2025-10-06 21:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:53.973555293 +0000 UTC m=+1139.266592047" watchObservedRunningTime="2025-10-06 21:49:54.008835102 +0000 UTC m=+1139.301871856" Oct 06 21:49:54 crc kubenswrapper[5014]: I1006 21:49:54.087418 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:49:54 crc kubenswrapper[5014]: W1006 21:49:54.095937 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9a75612c_8bae_463f_ae97_d13f279e1a11.slice/crio-fff04932d5c45d5838c2e414dbc982e9868aefa0a2b95126b20d2dfb51a2b802 WatchSource:0}: Error finding container fff04932d5c45d5838c2e414dbc982e9868aefa0a2b95126b20d2dfb51a2b802: Status 404 returned error can't find the container with id fff04932d5c45d5838c2e414dbc982e9868aefa0a2b95126b20d2dfb51a2b802 Oct 06 21:49:54 crc kubenswrapper[5014]: I1006 21:49:54.658638 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-796d5f888-r5f9l" podUID="34d354c8-b18e-458f-b050-1e3fa676c220" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": read tcp 10.217.0.2:40242->10.217.0.157:9311: read: connection reset by peer" Oct 06 21:49:54 crc kubenswrapper[5014]: I1006 21:49:54.659438 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-796d5f888-r5f9l" podUID="34d354c8-b18e-458f-b050-1e3fa676c220" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.157:9311/healthcheck\": read tcp 10.217.0.2:40246->10.217.0.157:9311: read: connection reset by peer" Oct 06 21:49:54 crc kubenswrapper[5014]: I1006 21:49:54.975630 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8486f280-519e-4f74-b023-b7bee7ca6053","Type":"ContainerStarted","Data":"ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b"} Oct 06 21:49:54 crc kubenswrapper[5014]: I1006 21:49:54.976389 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="8486f280-519e-4f74-b023-b7bee7ca6053" containerName="cinder-api-log" containerID="cri-o://ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba" gracePeriod=30 Oct 06 21:49:54 crc kubenswrapper[5014]: I1006 21:49:54.976915 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Oct 06 21:49:54 crc kubenswrapper[5014]: I1006 21:49:54.977321 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="8486f280-519e-4f74-b023-b7bee7ca6053" containerName="cinder-api" containerID="cri-o://ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b" gracePeriod=30 Oct 06 21:49:54 crc kubenswrapper[5014]: I1006 21:49:54.986984 5014 generic.go:334] "Generic (PLEG): container finished" podID="34d354c8-b18e-458f-b050-1e3fa676c220" containerID="a46af4adcc218820a252fabecc73325edd071cc0bb28905b46b24aa8c9c50aeb" exitCode=0 Oct 06 21:49:54 crc kubenswrapper[5014]: I1006 21:49:54.987385 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-796d5f888-r5f9l" event={"ID":"34d354c8-b18e-458f-b050-1e3fa676c220","Type":"ContainerDied","Data":"a46af4adcc218820a252fabecc73325edd071cc0bb28905b46b24aa8c9c50aeb"} Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.002046 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.002009651 podStartE2EDuration="4.002009651s" podCreationTimestamp="2025-10-06 21:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:54.998880282 +0000 UTC m=+1140.291917016" watchObservedRunningTime="2025-10-06 21:49:55.002009651 +0000 UTC m=+1140.295046385" Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.021395 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a75612c-8bae-463f-ae97-d13f279e1a11","Type":"ContainerStarted","Data":"fff04932d5c45d5838c2e414dbc982e9868aefa0a2b95126b20d2dfb51a2b802"} Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.033062 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9acabca3-5194-4034-9a64-ccdf44c42e4e","Type":"ContainerStarted","Data":"84135e3359a0b6072e0bb01add25d9584e8f933885fe05e96fb19ace6a59fbf8"} Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.033129 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.104905 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.207129 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-config-data\") pod \"34d354c8-b18e-458f-b050-1e3fa676c220\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.207325 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-config-data-custom\") pod \"34d354c8-b18e-458f-b050-1e3fa676c220\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.207379 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qhldc\" (UniqueName: \"kubernetes.io/projected/34d354c8-b18e-458f-b050-1e3fa676c220-kube-api-access-qhldc\") pod \"34d354c8-b18e-458f-b050-1e3fa676c220\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.207534 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34d354c8-b18e-458f-b050-1e3fa676c220-logs\") pod \"34d354c8-b18e-458f-b050-1e3fa676c220\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.207575 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-combined-ca-bundle\") pod \"34d354c8-b18e-458f-b050-1e3fa676c220\" (UID: \"34d354c8-b18e-458f-b050-1e3fa676c220\") " Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.210470 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/34d354c8-b18e-458f-b050-1e3fa676c220-logs" (OuterVolumeSpecName: "logs") pod "34d354c8-b18e-458f-b050-1e3fa676c220" (UID: "34d354c8-b18e-458f-b050-1e3fa676c220"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.219981 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34d354c8-b18e-458f-b050-1e3fa676c220-kube-api-access-qhldc" (OuterVolumeSpecName: "kube-api-access-qhldc") pod "34d354c8-b18e-458f-b050-1e3fa676c220" (UID: "34d354c8-b18e-458f-b050-1e3fa676c220"). InnerVolumeSpecName "kube-api-access-qhldc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.223022 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "34d354c8-b18e-458f-b050-1e3fa676c220" (UID: "34d354c8-b18e-458f-b050-1e3fa676c220"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.259728 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "34d354c8-b18e-458f-b050-1e3fa676c220" (UID: "34d354c8-b18e-458f-b050-1e3fa676c220"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.280896 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-config-data" (OuterVolumeSpecName: "config-data") pod "34d354c8-b18e-458f-b050-1e3fa676c220" (UID: "34d354c8-b18e-458f-b050-1e3fa676c220"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.309553 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/34d354c8-b18e-458f-b050-1e3fa676c220-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.309590 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.309603 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.309612 5014 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/34d354c8-b18e-458f-b050-1e3fa676c220-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:55 crc kubenswrapper[5014]: I1006 21:49:55.309635 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qhldc\" (UniqueName: \"kubernetes.io/projected/34d354c8-b18e-458f-b050-1e3fa676c220-kube-api-access-qhldc\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.025702 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.067875 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-796d5f888-r5f9l" event={"ID":"34d354c8-b18e-458f-b050-1e3fa676c220","Type":"ContainerDied","Data":"975e2caae16a8b8a55c6e92a7e9e1c08dbba34b9215e51beee95bae7cca77bc5"} Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.067929 5014 scope.go:117] "RemoveContainer" containerID="a46af4adcc218820a252fabecc73325edd071cc0bb28905b46b24aa8c9c50aeb" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.068197 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-796d5f888-r5f9l" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.086938 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a75612c-8bae-463f-ae97-d13f279e1a11","Type":"ContainerStarted","Data":"ac96bb1f7ab1dbde49cf028d8c34fc0038e016ea162c03e99aa73384f854cb90"} Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.102359 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-796d5f888-r5f9l"] Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.104001 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9acabca3-5194-4034-9a64-ccdf44c42e4e","Type":"ContainerStarted","Data":"efb225af13b0ea571df10f4df459f4bfd32ac797a4f60d85d9df5b7b0f6ef2ec"} Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.113842 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-796d5f888-r5f9l"] Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.116108 5014 scope.go:117] "RemoveContainer" containerID="89f411c4c01f370fbfb681453ae9467dc67b254e4dc44d28603dca0ea82a6281" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.116454 5014 generic.go:334] "Generic (PLEG): container finished" podID="8486f280-519e-4f74-b023-b7bee7ca6053" containerID="ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b" exitCode=0 Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.116485 5014 generic.go:334] "Generic (PLEG): container finished" podID="8486f280-519e-4f74-b023-b7bee7ca6053" containerID="ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba" exitCode=143 Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.116894 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8486f280-519e-4f74-b023-b7bee7ca6053","Type":"ContainerDied","Data":"ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b"} Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.116984 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8486f280-519e-4f74-b023-b7bee7ca6053","Type":"ContainerDied","Data":"ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba"} Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.116999 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8486f280-519e-4f74-b023-b7bee7ca6053","Type":"ContainerDied","Data":"6847221a3243a36c5e3665480257b9186b0859f3785fae2ca41d2fa4f5281df5"} Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.117170 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.132842 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.943123333 podStartE2EDuration="5.132821196s" podCreationTimestamp="2025-10-06 21:49:51 +0000 UTC" firstStartedPulling="2025-10-06 21:49:52.223578095 +0000 UTC m=+1137.516614829" lastFinishedPulling="2025-10-06 21:49:53.413275958 +0000 UTC m=+1138.706312692" observedRunningTime="2025-10-06 21:49:56.128021524 +0000 UTC m=+1141.421058258" watchObservedRunningTime="2025-10-06 21:49:56.132821196 +0000 UTC m=+1141.425858020" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.142803 5014 scope.go:117] "RemoveContainer" containerID="ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.144278 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-config-data-custom\") pod \"8486f280-519e-4f74-b023-b7bee7ca6053\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.144370 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-combined-ca-bundle\") pod \"8486f280-519e-4f74-b023-b7bee7ca6053\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.144416 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8486f280-519e-4f74-b023-b7bee7ca6053-logs\") pod \"8486f280-519e-4f74-b023-b7bee7ca6053\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.144522 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-config-data\") pod \"8486f280-519e-4f74-b023-b7bee7ca6053\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.144555 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8486f280-519e-4f74-b023-b7bee7ca6053-etc-machine-id\") pod \"8486f280-519e-4f74-b023-b7bee7ca6053\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.144613 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snmvf\" (UniqueName: \"kubernetes.io/projected/8486f280-519e-4f74-b023-b7bee7ca6053-kube-api-access-snmvf\") pod \"8486f280-519e-4f74-b023-b7bee7ca6053\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.144645 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-scripts\") pod \"8486f280-519e-4f74-b023-b7bee7ca6053\" (UID: \"8486f280-519e-4f74-b023-b7bee7ca6053\") " Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.144767 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8486f280-519e-4f74-b023-b7bee7ca6053-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "8486f280-519e-4f74-b023-b7bee7ca6053" (UID: "8486f280-519e-4f74-b023-b7bee7ca6053"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.145503 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8486f280-519e-4f74-b023-b7bee7ca6053-logs" (OuterVolumeSpecName: "logs") pod "8486f280-519e-4f74-b023-b7bee7ca6053" (UID: "8486f280-519e-4f74-b023-b7bee7ca6053"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.150346 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8486f280-519e-4f74-b023-b7bee7ca6053-kube-api-access-snmvf" (OuterVolumeSpecName: "kube-api-access-snmvf") pod "8486f280-519e-4f74-b023-b7bee7ca6053" (UID: "8486f280-519e-4f74-b023-b7bee7ca6053"). InnerVolumeSpecName "kube-api-access-snmvf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.151062 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8486f280-519e-4f74-b023-b7bee7ca6053" (UID: "8486f280-519e-4f74-b023-b7bee7ca6053"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.182777 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-scripts" (OuterVolumeSpecName: "scripts") pod "8486f280-519e-4f74-b023-b7bee7ca6053" (UID: "8486f280-519e-4f74-b023-b7bee7ca6053"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.222794 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8486f280-519e-4f74-b023-b7bee7ca6053" (UID: "8486f280-519e-4f74-b023-b7bee7ca6053"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.233600 5014 scope.go:117] "RemoveContainer" containerID="ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.238636 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-config-data" (OuterVolumeSpecName: "config-data") pod "8486f280-519e-4f74-b023-b7bee7ca6053" (UID: "8486f280-519e-4f74-b023-b7bee7ca6053"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.246709 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snmvf\" (UniqueName: \"kubernetes.io/projected/8486f280-519e-4f74-b023-b7bee7ca6053-kube-api-access-snmvf\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.246736 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.246746 5014 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.246753 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.246761 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8486f280-519e-4f74-b023-b7bee7ca6053-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.246769 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8486f280-519e-4f74-b023-b7bee7ca6053-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.246778 5014 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8486f280-519e-4f74-b023-b7bee7ca6053-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.270777 5014 scope.go:117] "RemoveContainer" containerID="ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b" Oct 06 21:49:56 crc kubenswrapper[5014]: E1006 21:49:56.271233 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b\": container with ID starting with ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b not found: ID does not exist" containerID="ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.271259 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b"} err="failed to get container status \"ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b\": rpc error: code = NotFound desc = could not find container \"ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b\": container with ID starting with ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b not found: ID does not exist" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.271279 5014 scope.go:117] "RemoveContainer" containerID="ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba" Oct 06 21:49:56 crc kubenswrapper[5014]: E1006 21:49:56.271583 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba\": container with ID starting with ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba not found: ID does not exist" containerID="ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.271602 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba"} err="failed to get container status \"ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba\": rpc error: code = NotFound desc = could not find container \"ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba\": container with ID starting with ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba not found: ID does not exist" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.271613 5014 scope.go:117] "RemoveContainer" containerID="ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.271744 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b"} err="failed to get container status \"ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b\": rpc error: code = NotFound desc = could not find container \"ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b\": container with ID starting with ea0f5d475eb0dc1f566def081de345dceeabc3901c1ed12e19730923bb34d60b not found: ID does not exist" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.271762 5014 scope.go:117] "RemoveContainer" containerID="ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.271878 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba"} err="failed to get container status \"ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba\": rpc error: code = NotFound desc = could not find container \"ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba\": container with ID starting with ce6fb1204a860873d65b66e9d9aa917c7bdd9bca643f06b841482e3a9248caba not found: ID does not exist" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.482991 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.504811 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.519887 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.559885 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Oct 06 21:49:56 crc kubenswrapper[5014]: E1006 21:49:56.560391 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8486f280-519e-4f74-b023-b7bee7ca6053" containerName="cinder-api-log" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.560410 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8486f280-519e-4f74-b023-b7bee7ca6053" containerName="cinder-api-log" Oct 06 21:49:56 crc kubenswrapper[5014]: E1006 21:49:56.560425 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8486f280-519e-4f74-b023-b7bee7ca6053" containerName="cinder-api" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.560434 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8486f280-519e-4f74-b023-b7bee7ca6053" containerName="cinder-api" Oct 06 21:49:56 crc kubenswrapper[5014]: E1006 21:49:56.560455 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34d354c8-b18e-458f-b050-1e3fa676c220" containerName="barbican-api-log" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.560463 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="34d354c8-b18e-458f-b050-1e3fa676c220" containerName="barbican-api-log" Oct 06 21:49:56 crc kubenswrapper[5014]: E1006 21:49:56.560502 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34d354c8-b18e-458f-b050-1e3fa676c220" containerName="barbican-api" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.560510 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="34d354c8-b18e-458f-b050-1e3fa676c220" containerName="barbican-api" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.560775 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="8486f280-519e-4f74-b023-b7bee7ca6053" containerName="cinder-api" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.560793 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="8486f280-519e-4f74-b023-b7bee7ca6053" containerName="cinder-api-log" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.560817 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="34d354c8-b18e-458f-b050-1e3fa676c220" containerName="barbican-api" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.560835 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="34d354c8-b18e-458f-b050-1e3fa676c220" containerName="barbican-api-log" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.562031 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.567823 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.567851 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.567988 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.574690 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.656466 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/06e8dc30-8c95-4585-82e0-fc82de286a1c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.656508 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-config-data\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.656542 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.656771 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsf6r\" (UniqueName: \"kubernetes.io/projected/06e8dc30-8c95-4585-82e0-fc82de286a1c-kube-api-access-wsf6r\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.656832 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-config-data-custom\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.657230 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-scripts\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.657489 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.657523 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/06e8dc30-8c95-4585-82e0-fc82de286a1c-logs\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.657586 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.759776 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-scripts\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.760271 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.760301 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/06e8dc30-8c95-4585-82e0-fc82de286a1c-logs\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.760833 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/06e8dc30-8c95-4585-82e0-fc82de286a1c-logs\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.760336 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.760988 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/06e8dc30-8c95-4585-82e0-fc82de286a1c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.761076 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/06e8dc30-8c95-4585-82e0-fc82de286a1c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.761018 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-config-data\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.761510 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.761727 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsf6r\" (UniqueName: \"kubernetes.io/projected/06e8dc30-8c95-4585-82e0-fc82de286a1c-kube-api-access-wsf6r\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.761774 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-config-data-custom\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.773383 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.773465 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-scripts\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.773959 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.775706 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-config-data-custom\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.776864 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-config-data\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.779282 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.800224 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsf6r\" (UniqueName: \"kubernetes.io/projected/06e8dc30-8c95-4585-82e0-fc82de286a1c-kube-api-access-wsf6r\") pod \"cinder-api-0\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " pod="openstack/cinder-api-0" Oct 06 21:49:56 crc kubenswrapper[5014]: I1006 21:49:56.882440 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 06 21:49:57 crc kubenswrapper[5014]: I1006 21:49:57.137200 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a75612c-8bae-463f-ae97-d13f279e1a11","Type":"ContainerStarted","Data":"5ee57a4b6ef5208e43f440adcdbc9d68a3da1df52462af2d1f59430815cc4a88"} Oct 06 21:49:57 crc kubenswrapper[5014]: I1006 21:49:57.383372 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 06 21:49:57 crc kubenswrapper[5014]: I1006 21:49:57.496185 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34d354c8-b18e-458f-b050-1e3fa676c220" path="/var/lib/kubelet/pods/34d354c8-b18e-458f-b050-1e3fa676c220/volumes" Oct 06 21:49:57 crc kubenswrapper[5014]: I1006 21:49:57.497350 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8486f280-519e-4f74-b023-b7bee7ca6053" path="/var/lib/kubelet/pods/8486f280-519e-4f74-b023-b7bee7ca6053/volumes" Oct 06 21:49:58 crc kubenswrapper[5014]: I1006 21:49:58.170395 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a75612c-8bae-463f-ae97-d13f279e1a11","Type":"ContainerStarted","Data":"fc8bba0b3b8dfbe3ea4e1140b7efac351af34ed8e1fbf0c92da877bb65ad2df9"} Oct 06 21:49:58 crc kubenswrapper[5014]: I1006 21:49:58.175040 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"06e8dc30-8c95-4585-82e0-fc82de286a1c","Type":"ContainerStarted","Data":"217fe11fe290a286d9693b3ce94387c3c5d799112f056294ef17d3e38a043e2a"} Oct 06 21:49:58 crc kubenswrapper[5014]: I1006 21:49:58.175278 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"06e8dc30-8c95-4585-82e0-fc82de286a1c","Type":"ContainerStarted","Data":"4a593f9dd7435ec0b93f8a4db7e1f1773704e5187c941dab328f3ec8a3edbcc2"} Oct 06 21:49:59 crc kubenswrapper[5014]: I1006 21:49:59.188882 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"06e8dc30-8c95-4585-82e0-fc82de286a1c","Type":"ContainerStarted","Data":"a4c0aa6211dfec0a5eda7b347326298f80376112ca717764da53c00f048df69b"} Oct 06 21:49:59 crc kubenswrapper[5014]: I1006 21:49:59.189460 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Oct 06 21:49:59 crc kubenswrapper[5014]: I1006 21:49:59.220691 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.220660639 podStartE2EDuration="3.220660639s" podCreationTimestamp="2025-10-06 21:49:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:49:59.219719719 +0000 UTC m=+1144.512756493" watchObservedRunningTime="2025-10-06 21:49:59.220660639 +0000 UTC m=+1144.513697423" Oct 06 21:50:00 crc kubenswrapper[5014]: I1006 21:50:00.202114 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a75612c-8bae-463f-ae97-d13f279e1a11","Type":"ContainerStarted","Data":"568911e5918392d69ef9b9424f083ece0a988d7f9a8eea2186dfbec890045616"} Oct 06 21:50:00 crc kubenswrapper[5014]: I1006 21:50:00.202498 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 06 21:50:00 crc kubenswrapper[5014]: I1006 21:50:00.242230 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.451777821 podStartE2EDuration="7.242202878s" podCreationTimestamp="2025-10-06 21:49:53 +0000 UTC" firstStartedPulling="2025-10-06 21:49:54.100740908 +0000 UTC m=+1139.393777632" lastFinishedPulling="2025-10-06 21:49:58.891165925 +0000 UTC m=+1144.184202689" observedRunningTime="2025-10-06 21:50:00.233761269 +0000 UTC m=+1145.526798003" watchObservedRunningTime="2025-10-06 21:50:00.242202878 +0000 UTC m=+1145.535239612" Oct 06 21:50:01 crc kubenswrapper[5014]: I1006 21:50:01.552931 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:50:01 crc kubenswrapper[5014]: I1006 21:50:01.652604 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cddb74997-nnd26"] Oct 06 21:50:01 crc kubenswrapper[5014]: I1006 21:50:01.653010 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cddb74997-nnd26" podUID="8248bd19-6009-4b21-9c52-1d016e1bfef2" containerName="dnsmasq-dns" containerID="cri-o://27eb46dd9464b7adab1e8a72a4c5823879975f08d71d6d76de893edca8a709d9" gracePeriod=10 Oct 06 21:50:01 crc kubenswrapper[5014]: I1006 21:50:01.763761 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Oct 06 21:50:01 crc kubenswrapper[5014]: I1006 21:50:01.828817 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.220456 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.222604 5014 generic.go:334] "Generic (PLEG): container finished" podID="8248bd19-6009-4b21-9c52-1d016e1bfef2" containerID="27eb46dd9464b7adab1e8a72a4c5823879975f08d71d6d76de893edca8a709d9" exitCode=0 Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.222721 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cddb74997-nnd26" event={"ID":"8248bd19-6009-4b21-9c52-1d016e1bfef2","Type":"ContainerDied","Data":"27eb46dd9464b7adab1e8a72a4c5823879975f08d71d6d76de893edca8a709d9"} Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.222803 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cddb74997-nnd26" event={"ID":"8248bd19-6009-4b21-9c52-1d016e1bfef2","Type":"ContainerDied","Data":"d5ac279e7611d1bedaddb17c1db532fd75f28e76675cf5ef7d66092ed4770c6d"} Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.222838 5014 scope.go:117] "RemoveContainer" containerID="27eb46dd9464b7adab1e8a72a4c5823879975f08d71d6d76de893edca8a709d9" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.223144 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="9acabca3-5194-4034-9a64-ccdf44c42e4e" containerName="probe" containerID="cri-o://efb225af13b0ea571df10f4df459f4bfd32ac797a4f60d85d9df5b7b0f6ef2ec" gracePeriod=30 Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.223074 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="9acabca3-5194-4034-9a64-ccdf44c42e4e" containerName="cinder-scheduler" containerID="cri-o://84135e3359a0b6072e0bb01add25d9584e8f933885fe05e96fb19ace6a59fbf8" gracePeriod=30 Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.251746 5014 scope.go:117] "RemoveContainer" containerID="a024f53b70fe1b3c5654b36136f202ab253bdd2be9ca0595375efe50d2b2a3db" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.273763 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-config\") pod \"8248bd19-6009-4b21-9c52-1d016e1bfef2\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.274362 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-ovsdbserver-nb\") pod \"8248bd19-6009-4b21-9c52-1d016e1bfef2\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.274519 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfcmq\" (UniqueName: \"kubernetes.io/projected/8248bd19-6009-4b21-9c52-1d016e1bfef2-kube-api-access-cfcmq\") pod \"8248bd19-6009-4b21-9c52-1d016e1bfef2\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.274713 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-dns-swift-storage-0\") pod \"8248bd19-6009-4b21-9c52-1d016e1bfef2\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.274765 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-ovsdbserver-sb\") pod \"8248bd19-6009-4b21-9c52-1d016e1bfef2\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.274914 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-dns-svc\") pod \"8248bd19-6009-4b21-9c52-1d016e1bfef2\" (UID: \"8248bd19-6009-4b21-9c52-1d016e1bfef2\") " Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.283937 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8248bd19-6009-4b21-9c52-1d016e1bfef2-kube-api-access-cfcmq" (OuterVolumeSpecName: "kube-api-access-cfcmq") pod "8248bd19-6009-4b21-9c52-1d016e1bfef2" (UID: "8248bd19-6009-4b21-9c52-1d016e1bfef2"). InnerVolumeSpecName "kube-api-access-cfcmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.291817 5014 scope.go:117] "RemoveContainer" containerID="27eb46dd9464b7adab1e8a72a4c5823879975f08d71d6d76de893edca8a709d9" Oct 06 21:50:02 crc kubenswrapper[5014]: E1006 21:50:02.292365 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27eb46dd9464b7adab1e8a72a4c5823879975f08d71d6d76de893edca8a709d9\": container with ID starting with 27eb46dd9464b7adab1e8a72a4c5823879975f08d71d6d76de893edca8a709d9 not found: ID does not exist" containerID="27eb46dd9464b7adab1e8a72a4c5823879975f08d71d6d76de893edca8a709d9" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.292417 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27eb46dd9464b7adab1e8a72a4c5823879975f08d71d6d76de893edca8a709d9"} err="failed to get container status \"27eb46dd9464b7adab1e8a72a4c5823879975f08d71d6d76de893edca8a709d9\": rpc error: code = NotFound desc = could not find container \"27eb46dd9464b7adab1e8a72a4c5823879975f08d71d6d76de893edca8a709d9\": container with ID starting with 27eb46dd9464b7adab1e8a72a4c5823879975f08d71d6d76de893edca8a709d9 not found: ID does not exist" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.292447 5014 scope.go:117] "RemoveContainer" containerID="a024f53b70fe1b3c5654b36136f202ab253bdd2be9ca0595375efe50d2b2a3db" Oct 06 21:50:02 crc kubenswrapper[5014]: E1006 21:50:02.292769 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a024f53b70fe1b3c5654b36136f202ab253bdd2be9ca0595375efe50d2b2a3db\": container with ID starting with a024f53b70fe1b3c5654b36136f202ab253bdd2be9ca0595375efe50d2b2a3db not found: ID does not exist" containerID="a024f53b70fe1b3c5654b36136f202ab253bdd2be9ca0595375efe50d2b2a3db" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.292787 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a024f53b70fe1b3c5654b36136f202ab253bdd2be9ca0595375efe50d2b2a3db"} err="failed to get container status \"a024f53b70fe1b3c5654b36136f202ab253bdd2be9ca0595375efe50d2b2a3db\": rpc error: code = NotFound desc = could not find container \"a024f53b70fe1b3c5654b36136f202ab253bdd2be9ca0595375efe50d2b2a3db\": container with ID starting with a024f53b70fe1b3c5654b36136f202ab253bdd2be9ca0595375efe50d2b2a3db not found: ID does not exist" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.355897 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-config" (OuterVolumeSpecName: "config") pod "8248bd19-6009-4b21-9c52-1d016e1bfef2" (UID: "8248bd19-6009-4b21-9c52-1d016e1bfef2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.356221 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8248bd19-6009-4b21-9c52-1d016e1bfef2" (UID: "8248bd19-6009-4b21-9c52-1d016e1bfef2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.361595 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8248bd19-6009-4b21-9c52-1d016e1bfef2" (UID: "8248bd19-6009-4b21-9c52-1d016e1bfef2"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.365062 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8248bd19-6009-4b21-9c52-1d016e1bfef2" (UID: "8248bd19-6009-4b21-9c52-1d016e1bfef2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.365528 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8248bd19-6009-4b21-9c52-1d016e1bfef2" (UID: "8248bd19-6009-4b21-9c52-1d016e1bfef2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.377436 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfcmq\" (UniqueName: \"kubernetes.io/projected/8248bd19-6009-4b21-9c52-1d016e1bfef2-kube-api-access-cfcmq\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.377483 5014 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.377497 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.377510 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.377521 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:02 crc kubenswrapper[5014]: I1006 21:50:02.377531 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8248bd19-6009-4b21-9c52-1d016e1bfef2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:03 crc kubenswrapper[5014]: I1006 21:50:03.239168 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cddb74997-nnd26" Oct 06 21:50:03 crc kubenswrapper[5014]: I1006 21:50:03.245219 5014 generic.go:334] "Generic (PLEG): container finished" podID="9acabca3-5194-4034-9a64-ccdf44c42e4e" containerID="efb225af13b0ea571df10f4df459f4bfd32ac797a4f60d85d9df5b7b0f6ef2ec" exitCode=0 Oct 06 21:50:03 crc kubenswrapper[5014]: I1006 21:50:03.245286 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9acabca3-5194-4034-9a64-ccdf44c42e4e","Type":"ContainerDied","Data":"efb225af13b0ea571df10f4df459f4bfd32ac797a4f60d85d9df5b7b0f6ef2ec"} Oct 06 21:50:03 crc kubenswrapper[5014]: I1006 21:50:03.286281 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cddb74997-nnd26"] Oct 06 21:50:03 crc kubenswrapper[5014]: I1006 21:50:03.300017 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cddb74997-nnd26"] Oct 06 21:50:03 crc kubenswrapper[5014]: I1006 21:50:03.435286 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:50:03 crc kubenswrapper[5014]: I1006 21:50:03.511262 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8248bd19-6009-4b21-9c52-1d016e1bfef2" path="/var/lib/kubelet/pods/8248bd19-6009-4b21-9c52-1d016e1bfef2/volumes" Oct 06 21:50:04 crc kubenswrapper[5014]: I1006 21:50:04.386294 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:50:04 crc kubenswrapper[5014]: I1006 21:50:04.397715 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:50:05 crc kubenswrapper[5014]: I1006 21:50:05.203433 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.086894 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Oct 06 21:50:06 crc kubenswrapper[5014]: E1006 21:50:06.101034 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8248bd19-6009-4b21-9c52-1d016e1bfef2" containerName="init" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.101230 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8248bd19-6009-4b21-9c52-1d016e1bfef2" containerName="init" Oct 06 21:50:06 crc kubenswrapper[5014]: E1006 21:50:06.101298 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8248bd19-6009-4b21-9c52-1d016e1bfef2" containerName="dnsmasq-dns" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.101352 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8248bd19-6009-4b21-9c52-1d016e1bfef2" containerName="dnsmasq-dns" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.101599 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="8248bd19-6009-4b21-9c52-1d016e1bfef2" containerName="dnsmasq-dns" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.102188 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.102327 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.112010 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.112016 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.112064 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-rmrxw" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.150703 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a07ac22a-2156-4eb7-8301-fac1dd39bfad-openstack-config\") pod \"openstackclient\" (UID: \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.150763 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a07ac22a-2156-4eb7-8301-fac1dd39bfad-combined-ca-bundle\") pod \"openstackclient\" (UID: \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.150839 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a07ac22a-2156-4eb7-8301-fac1dd39bfad-openstack-config-secret\") pod \"openstackclient\" (UID: \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.150954 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rpkv\" (UniqueName: \"kubernetes.io/projected/a07ac22a-2156-4eb7-8301-fac1dd39bfad-kube-api-access-6rpkv\") pod \"openstackclient\" (UID: \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.253186 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a07ac22a-2156-4eb7-8301-fac1dd39bfad-openstack-config-secret\") pod \"openstackclient\" (UID: \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.253309 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rpkv\" (UniqueName: \"kubernetes.io/projected/a07ac22a-2156-4eb7-8301-fac1dd39bfad-kube-api-access-6rpkv\") pod \"openstackclient\" (UID: \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.253391 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a07ac22a-2156-4eb7-8301-fac1dd39bfad-openstack-config\") pod \"openstackclient\" (UID: \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.253427 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a07ac22a-2156-4eb7-8301-fac1dd39bfad-combined-ca-bundle\") pod \"openstackclient\" (UID: \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.255679 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a07ac22a-2156-4eb7-8301-fac1dd39bfad-openstack-config\") pod \"openstackclient\" (UID: \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.261668 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a07ac22a-2156-4eb7-8301-fac1dd39bfad-combined-ca-bundle\") pod \"openstackclient\" (UID: \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.263813 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a07ac22a-2156-4eb7-8301-fac1dd39bfad-openstack-config-secret\") pod \"openstackclient\" (UID: \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.276900 5014 generic.go:334] "Generic (PLEG): container finished" podID="9acabca3-5194-4034-9a64-ccdf44c42e4e" containerID="84135e3359a0b6072e0bb01add25d9584e8f933885fe05e96fb19ace6a59fbf8" exitCode=0 Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.277153 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rpkv\" (UniqueName: \"kubernetes.io/projected/a07ac22a-2156-4eb7-8301-fac1dd39bfad-kube-api-access-6rpkv\") pod \"openstackclient\" (UID: \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.277161 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9acabca3-5194-4034-9a64-ccdf44c42e4e","Type":"ContainerDied","Data":"84135e3359a0b6072e0bb01add25d9584e8f933885fe05e96fb19ace6a59fbf8"} Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.356554 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.357106 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.369436 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.393924 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.395454 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.402698 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.456527 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-combined-ca-bundle\") pod \"openstackclient\" (UID: \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.456669 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-openstack-config-secret\") pod \"openstackclient\" (UID: \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.456884 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-openstack-config\") pod \"openstackclient\" (UID: \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.456909 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpslr\" (UniqueName: \"kubernetes.io/projected/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-kube-api-access-gpslr\") pod \"openstackclient\" (UID: \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: E1006 21:50:06.525862 5014 log.go:32] "RunPodSandbox from runtime service failed" err=< Oct 06 21:50:06 crc kubenswrapper[5014]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_a07ac22a-2156-4eb7-8301-fac1dd39bfad_0(b4559743890c37c637db16bdba536f968c2bce9dfc8f5cad42c51c74ac2ecc49): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"b4559743890c37c637db16bdba536f968c2bce9dfc8f5cad42c51c74ac2ecc49" Netns:"/var/run/netns/d65e16f6-af40-4c43-b324-e07db04f63db" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=b4559743890c37c637db16bdba536f968c2bce9dfc8f5cad42c51c74ac2ecc49;K8S_POD_UID=a07ac22a-2156-4eb7-8301-fac1dd39bfad" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/a07ac22a-2156-4eb7-8301-fac1dd39bfad]: expected pod UID "a07ac22a-2156-4eb7-8301-fac1dd39bfad" but got "db1ef19c-c5bd-4bf1-a58c-73ff198caaa9" from Kube API Oct 06 21:50:06 crc kubenswrapper[5014]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Oct 06 21:50:06 crc kubenswrapper[5014]: > Oct 06 21:50:06 crc kubenswrapper[5014]: E1006 21:50:06.525939 5014 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Oct 06 21:50:06 crc kubenswrapper[5014]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_a07ac22a-2156-4eb7-8301-fac1dd39bfad_0(b4559743890c37c637db16bdba536f968c2bce9dfc8f5cad42c51c74ac2ecc49): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"b4559743890c37c637db16bdba536f968c2bce9dfc8f5cad42c51c74ac2ecc49" Netns:"/var/run/netns/d65e16f6-af40-4c43-b324-e07db04f63db" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=b4559743890c37c637db16bdba536f968c2bce9dfc8f5cad42c51c74ac2ecc49;K8S_POD_UID=a07ac22a-2156-4eb7-8301-fac1dd39bfad" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/a07ac22a-2156-4eb7-8301-fac1dd39bfad]: expected pod UID "a07ac22a-2156-4eb7-8301-fac1dd39bfad" but got "db1ef19c-c5bd-4bf1-a58c-73ff198caaa9" from Kube API Oct 06 21:50:06 crc kubenswrapper[5014]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Oct 06 21:50:06 crc kubenswrapper[5014]: > pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.558916 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-combined-ca-bundle\") pod \"openstackclient\" (UID: \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.559042 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-openstack-config-secret\") pod \"openstackclient\" (UID: \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.559075 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-openstack-config\") pod \"openstackclient\" (UID: \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.559099 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpslr\" (UniqueName: \"kubernetes.io/projected/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-kube-api-access-gpslr\") pod \"openstackclient\" (UID: \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.562628 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-openstack-config\") pod \"openstackclient\" (UID: \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.573315 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-combined-ca-bundle\") pod \"openstackclient\" (UID: \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.573761 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-openstack-config-secret\") pod \"openstackclient\" (UID: \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.578287 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpslr\" (UniqueName: \"kubernetes.io/projected/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-kube-api-access-gpslr\") pod \"openstackclient\" (UID: \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\") " pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.597770 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.659970 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-config-data\") pod \"9acabca3-5194-4034-9a64-ccdf44c42e4e\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.660111 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-config-data-custom\") pod \"9acabca3-5194-4034-9a64-ccdf44c42e4e\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.660145 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxw8m\" (UniqueName: \"kubernetes.io/projected/9acabca3-5194-4034-9a64-ccdf44c42e4e-kube-api-access-rxw8m\") pod \"9acabca3-5194-4034-9a64-ccdf44c42e4e\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.660176 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9acabca3-5194-4034-9a64-ccdf44c42e4e-etc-machine-id\") pod \"9acabca3-5194-4034-9a64-ccdf44c42e4e\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.660233 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-scripts\") pod \"9acabca3-5194-4034-9a64-ccdf44c42e4e\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.660332 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-combined-ca-bundle\") pod \"9acabca3-5194-4034-9a64-ccdf44c42e4e\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.661078 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9acabca3-5194-4034-9a64-ccdf44c42e4e-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "9acabca3-5194-4034-9a64-ccdf44c42e4e" (UID: "9acabca3-5194-4034-9a64-ccdf44c42e4e"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.664885 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9acabca3-5194-4034-9a64-ccdf44c42e4e" (UID: "9acabca3-5194-4034-9a64-ccdf44c42e4e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.665578 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9acabca3-5194-4034-9a64-ccdf44c42e4e-kube-api-access-rxw8m" (OuterVolumeSpecName: "kube-api-access-rxw8m") pod "9acabca3-5194-4034-9a64-ccdf44c42e4e" (UID: "9acabca3-5194-4034-9a64-ccdf44c42e4e"). InnerVolumeSpecName "kube-api-access-rxw8m". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.670102 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-scripts" (OuterVolumeSpecName: "scripts") pod "9acabca3-5194-4034-9a64-ccdf44c42e4e" (UID: "9acabca3-5194-4034-9a64-ccdf44c42e4e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.721298 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.724934 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9acabca3-5194-4034-9a64-ccdf44c42e4e" (UID: "9acabca3-5194-4034-9a64-ccdf44c42e4e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.765929 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-config-data" (OuterVolumeSpecName: "config-data") pod "9acabca3-5194-4034-9a64-ccdf44c42e4e" (UID: "9acabca3-5194-4034-9a64-ccdf44c42e4e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.773090 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-config-data\") pod \"9acabca3-5194-4034-9a64-ccdf44c42e4e\" (UID: \"9acabca3-5194-4034-9a64-ccdf44c42e4e\") " Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.773848 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.773914 5014 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.773969 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxw8m\" (UniqueName: \"kubernetes.io/projected/9acabca3-5194-4034-9a64-ccdf44c42e4e-kube-api-access-rxw8m\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.774039 5014 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9acabca3-5194-4034-9a64-ccdf44c42e4e-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.774091 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:06 crc kubenswrapper[5014]: W1006 21:50:06.774541 5014 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/9acabca3-5194-4034-9a64-ccdf44c42e4e/volumes/kubernetes.io~secret/config-data Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.774569 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-config-data" (OuterVolumeSpecName: "config-data") pod "9acabca3-5194-4034-9a64-ccdf44c42e4e" (UID: "9acabca3-5194-4034-9a64-ccdf44c42e4e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.796714 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-85f7fb587d-lk8cm"] Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.796950 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-85f7fb587d-lk8cm" podUID="5b81e360-666a-4498-b4a1-b2e2bb7f9a6d" containerName="neutron-api" containerID="cri-o://361154f06b2adce70c209fd6d3ee1a7e31b251c0ff0e58387aeb8390a251434a" gracePeriod=30 Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.798685 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-85f7fb587d-lk8cm" podUID="5b81e360-666a-4498-b4a1-b2e2bb7f9a6d" containerName="neutron-httpd" containerID="cri-o://0964dccde0b5c76aeb43c1e10947a261ea6a671b47c328ed331ba8d124f92931" gracePeriod=30 Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.819053 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 06 21:50:06 crc kubenswrapper[5014]: I1006 21:50:06.875990 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9acabca3-5194-4034-9a64-ccdf44c42e4e-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.286337 5014 generic.go:334] "Generic (PLEG): container finished" podID="5b81e360-666a-4498-b4a1-b2e2bb7f9a6d" containerID="0964dccde0b5c76aeb43c1e10947a261ea6a671b47c328ed331ba8d124f92931" exitCode=0 Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.286518 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85f7fb587d-lk8cm" event={"ID":"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d","Type":"ContainerDied","Data":"0964dccde0b5c76aeb43c1e10947a261ea6a671b47c328ed331ba8d124f92931"} Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.288593 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.289167 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.290027 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9acabca3-5194-4034-9a64-ccdf44c42e4e","Type":"ContainerDied","Data":"1b15a617ba82e5830c1dafc78313d26f4d11c63e0363abccfc410aa665373684"} Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.290115 5014 scope.go:117] "RemoveContainer" containerID="efb225af13b0ea571df10f4df459f4bfd32ac797a4f60d85d9df5b7b0f6ef2ec" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.297254 5014 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="a07ac22a-2156-4eb7-8301-fac1dd39bfad" podUID="db1ef19c-c5bd-4bf1-a58c-73ff198caaa9" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.323588 5014 scope.go:117] "RemoveContainer" containerID="84135e3359a0b6072e0bb01add25d9584e8f933885fe05e96fb19ace6a59fbf8" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.361895 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.411697 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.434074 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.480839 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.492814 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a07ac22a-2156-4eb7-8301-fac1dd39bfad-combined-ca-bundle\") pod \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\" (UID: \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\") " Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.492893 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a07ac22a-2156-4eb7-8301-fac1dd39bfad-openstack-config-secret\") pod \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\" (UID: \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\") " Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.492965 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a07ac22a-2156-4eb7-8301-fac1dd39bfad-openstack-config\") pod \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\" (UID: \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\") " Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.493074 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rpkv\" (UniqueName: \"kubernetes.io/projected/a07ac22a-2156-4eb7-8301-fac1dd39bfad-kube-api-access-6rpkv\") pod \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\" (UID: \"a07ac22a-2156-4eb7-8301-fac1dd39bfad\") " Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.495198 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a07ac22a-2156-4eb7-8301-fac1dd39bfad-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "a07ac22a-2156-4eb7-8301-fac1dd39bfad" (UID: "a07ac22a-2156-4eb7-8301-fac1dd39bfad"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.498712 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a07ac22a-2156-4eb7-8301-fac1dd39bfad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a07ac22a-2156-4eb7-8301-fac1dd39bfad" (UID: "a07ac22a-2156-4eb7-8301-fac1dd39bfad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.499497 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9acabca3-5194-4034-9a64-ccdf44c42e4e" path="/var/lib/kubelet/pods/9acabca3-5194-4034-9a64-ccdf44c42e4e/volumes" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.502781 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a07ac22a-2156-4eb7-8301-fac1dd39bfad-kube-api-access-6rpkv" (OuterVolumeSpecName: "kube-api-access-6rpkv") pod "a07ac22a-2156-4eb7-8301-fac1dd39bfad" (UID: "a07ac22a-2156-4eb7-8301-fac1dd39bfad"). InnerVolumeSpecName "kube-api-access-6rpkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.516848 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a07ac22a-2156-4eb7-8301-fac1dd39bfad-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "a07ac22a-2156-4eb7-8301-fac1dd39bfad" (UID: "a07ac22a-2156-4eb7-8301-fac1dd39bfad"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.577006 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Oct 06 21:50:07 crc kubenswrapper[5014]: E1006 21:50:07.577585 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9acabca3-5194-4034-9a64-ccdf44c42e4e" containerName="probe" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.577601 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9acabca3-5194-4034-9a64-ccdf44c42e4e" containerName="probe" Oct 06 21:50:07 crc kubenswrapper[5014]: E1006 21:50:07.577625 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9acabca3-5194-4034-9a64-ccdf44c42e4e" containerName="cinder-scheduler" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.577632 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9acabca3-5194-4034-9a64-ccdf44c42e4e" containerName="cinder-scheduler" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.577817 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="9acabca3-5194-4034-9a64-ccdf44c42e4e" containerName="probe" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.577846 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="9acabca3-5194-4034-9a64-ccdf44c42e4e" containerName="cinder-scheduler" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.578734 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.578812 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.583338 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.595059 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rpkv\" (UniqueName: \"kubernetes.io/projected/a07ac22a-2156-4eb7-8301-fac1dd39bfad-kube-api-access-6rpkv\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.595102 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a07ac22a-2156-4eb7-8301-fac1dd39bfad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.595116 5014 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a07ac22a-2156-4eb7-8301-fac1dd39bfad-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.595128 5014 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a07ac22a-2156-4eb7-8301-fac1dd39bfad-openstack-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.696990 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mjqj\" (UniqueName: \"kubernetes.io/projected/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-kube-api-access-6mjqj\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.697060 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.697168 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.697225 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-config-data\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.697262 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-scripts\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.697283 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.799087 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.799176 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-config-data\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.799228 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-scripts\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.799251 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.799302 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mjqj\" (UniqueName: \"kubernetes.io/projected/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-kube-api-access-6mjqj\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.799358 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.799440 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.804231 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.804527 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-config-data\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.808032 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-scripts\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.808161 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.822179 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mjqj\" (UniqueName: \"kubernetes.io/projected/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-kube-api-access-6mjqj\") pod \"cinder-scheduler-0\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " pod="openstack/cinder-scheduler-0" Oct 06 21:50:07 crc kubenswrapper[5014]: I1006 21:50:07.892120 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 06 21:50:08 crc kubenswrapper[5014]: I1006 21:50:08.300723 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9","Type":"ContainerStarted","Data":"9b79c8c180f837b860abd2173bc1ada44ea1b33b8375e75d2d5bd649386be9c1"} Oct 06 21:50:08 crc kubenswrapper[5014]: I1006 21:50:08.305445 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 06 21:50:08 crc kubenswrapper[5014]: I1006 21:50:08.324131 5014 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="a07ac22a-2156-4eb7-8301-fac1dd39bfad" podUID="db1ef19c-c5bd-4bf1-a58c-73ff198caaa9" Oct 06 21:50:08 crc kubenswrapper[5014]: I1006 21:50:08.453958 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.036331 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.057008 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.128353 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-config\") pod \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.128443 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-ovndb-tls-certs\") pod \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.128550 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pl5ck\" (UniqueName: \"kubernetes.io/projected/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-kube-api-access-pl5ck\") pod \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.128690 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-combined-ca-bundle\") pod \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.129473 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-httpd-config\") pod \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\" (UID: \"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d\") " Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.144463 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "5b81e360-666a-4498-b4a1-b2e2bb7f9a6d" (UID: "5b81e360-666a-4498-b4a1-b2e2bb7f9a6d"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.163240 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-kube-api-access-pl5ck" (OuterVolumeSpecName: "kube-api-access-pl5ck") pod "5b81e360-666a-4498-b4a1-b2e2bb7f9a6d" (UID: "5b81e360-666a-4498-b4a1-b2e2bb7f9a6d"). InnerVolumeSpecName "kube-api-access-pl5ck". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.189774 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-config" (OuterVolumeSpecName: "config") pod "5b81e360-666a-4498-b4a1-b2e2bb7f9a6d" (UID: "5b81e360-666a-4498-b4a1-b2e2bb7f9a6d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.232938 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.232969 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pl5ck\" (UniqueName: \"kubernetes.io/projected/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-kube-api-access-pl5ck\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.232982 5014 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-httpd-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.234291 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "5b81e360-666a-4498-b4a1-b2e2bb7f9a6d" (UID: "5b81e360-666a-4498-b4a1-b2e2bb7f9a6d"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.240362 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5b81e360-666a-4498-b4a1-b2e2bb7f9a6d" (UID: "5b81e360-666a-4498-b4a1-b2e2bb7f9a6d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.323742 5014 generic.go:334] "Generic (PLEG): container finished" podID="5b81e360-666a-4498-b4a1-b2e2bb7f9a6d" containerID="361154f06b2adce70c209fd6d3ee1a7e31b251c0ff0e58387aeb8390a251434a" exitCode=0 Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.324116 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85f7fb587d-lk8cm" event={"ID":"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d","Type":"ContainerDied","Data":"361154f06b2adce70c209fd6d3ee1a7e31b251c0ff0e58387aeb8390a251434a"} Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.324156 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85f7fb587d-lk8cm" event={"ID":"5b81e360-666a-4498-b4a1-b2e2bb7f9a6d","Type":"ContainerDied","Data":"00347897abfc1251df651a739eeb6d2b11e283731003a9bc54a2174c6b7b8945"} Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.324176 5014 scope.go:117] "RemoveContainer" containerID="0964dccde0b5c76aeb43c1e10947a261ea6a671b47c328ed331ba8d124f92931" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.325949 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85f7fb587d-lk8cm" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.340057 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf","Type":"ContainerStarted","Data":"8874248b35b3044ef165fdc3593e8e2e389f8fc195b0aace5c1260fbf7aea6e6"} Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.340105 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf","Type":"ContainerStarted","Data":"399eb4cfb0a9dbf7669b0c4a88685369d200d61fd03a67f2e15006ad5b098abb"} Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.346930 5014 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.346980 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.401985 5014 scope.go:117] "RemoveContainer" containerID="361154f06b2adce70c209fd6d3ee1a7e31b251c0ff0e58387aeb8390a251434a" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.425969 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-85f7fb587d-lk8cm"] Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.436830 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-85f7fb587d-lk8cm"] Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.452776 5014 scope.go:117] "RemoveContainer" containerID="0964dccde0b5c76aeb43c1e10947a261ea6a671b47c328ed331ba8d124f92931" Oct 06 21:50:09 crc kubenswrapper[5014]: E1006 21:50:09.453255 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0964dccde0b5c76aeb43c1e10947a261ea6a671b47c328ed331ba8d124f92931\": container with ID starting with 0964dccde0b5c76aeb43c1e10947a261ea6a671b47c328ed331ba8d124f92931 not found: ID does not exist" containerID="0964dccde0b5c76aeb43c1e10947a261ea6a671b47c328ed331ba8d124f92931" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.453295 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0964dccde0b5c76aeb43c1e10947a261ea6a671b47c328ed331ba8d124f92931"} err="failed to get container status \"0964dccde0b5c76aeb43c1e10947a261ea6a671b47c328ed331ba8d124f92931\": rpc error: code = NotFound desc = could not find container \"0964dccde0b5c76aeb43c1e10947a261ea6a671b47c328ed331ba8d124f92931\": container with ID starting with 0964dccde0b5c76aeb43c1e10947a261ea6a671b47c328ed331ba8d124f92931 not found: ID does not exist" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.453326 5014 scope.go:117] "RemoveContainer" containerID="361154f06b2adce70c209fd6d3ee1a7e31b251c0ff0e58387aeb8390a251434a" Oct 06 21:50:09 crc kubenswrapper[5014]: E1006 21:50:09.453823 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"361154f06b2adce70c209fd6d3ee1a7e31b251c0ff0e58387aeb8390a251434a\": container with ID starting with 361154f06b2adce70c209fd6d3ee1a7e31b251c0ff0e58387aeb8390a251434a not found: ID does not exist" containerID="361154f06b2adce70c209fd6d3ee1a7e31b251c0ff0e58387aeb8390a251434a" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.453847 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"361154f06b2adce70c209fd6d3ee1a7e31b251c0ff0e58387aeb8390a251434a"} err="failed to get container status \"361154f06b2adce70c209fd6d3ee1a7e31b251c0ff0e58387aeb8390a251434a\": rpc error: code = NotFound desc = could not find container \"361154f06b2adce70c209fd6d3ee1a7e31b251c0ff0e58387aeb8390a251434a\": container with ID starting with 361154f06b2adce70c209fd6d3ee1a7e31b251c0ff0e58387aeb8390a251434a not found: ID does not exist" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.505083 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b81e360-666a-4498-b4a1-b2e2bb7f9a6d" path="/var/lib/kubelet/pods/5b81e360-666a-4498-b4a1-b2e2bb7f9a6d/volumes" Oct 06 21:50:09 crc kubenswrapper[5014]: I1006 21:50:09.508568 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a07ac22a-2156-4eb7-8301-fac1dd39bfad" path="/var/lib/kubelet/pods/a07ac22a-2156-4eb7-8301-fac1dd39bfad/volumes" Oct 06 21:50:10 crc kubenswrapper[5014]: I1006 21:50:10.360207 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf","Type":"ContainerStarted","Data":"8e43bbec207ad90407cab0459eeaa37da10b5228534b3bcc64a9d48cab00a4fb"} Oct 06 21:50:10 crc kubenswrapper[5014]: I1006 21:50:10.387358 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.387336893 podStartE2EDuration="3.387336893s" podCreationTimestamp="2025-10-06 21:50:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:50:10.377958235 +0000 UTC m=+1155.670994969" watchObservedRunningTime="2025-10-06 21:50:10.387336893 +0000 UTC m=+1155.680373627" Oct 06 21:50:10 crc kubenswrapper[5014]: I1006 21:50:10.938206 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-796f6ffb8f-4rjtg"] Oct 06 21:50:10 crc kubenswrapper[5014]: E1006 21:50:10.938662 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b81e360-666a-4498-b4a1-b2e2bb7f9a6d" containerName="neutron-httpd" Oct 06 21:50:10 crc kubenswrapper[5014]: I1006 21:50:10.938676 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b81e360-666a-4498-b4a1-b2e2bb7f9a6d" containerName="neutron-httpd" Oct 06 21:50:10 crc kubenswrapper[5014]: E1006 21:50:10.938708 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b81e360-666a-4498-b4a1-b2e2bb7f9a6d" containerName="neutron-api" Oct 06 21:50:10 crc kubenswrapper[5014]: I1006 21:50:10.938714 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b81e360-666a-4498-b4a1-b2e2bb7f9a6d" containerName="neutron-api" Oct 06 21:50:10 crc kubenswrapper[5014]: I1006 21:50:10.938887 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b81e360-666a-4498-b4a1-b2e2bb7f9a6d" containerName="neutron-api" Oct 06 21:50:10 crc kubenswrapper[5014]: I1006 21:50:10.938918 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b81e360-666a-4498-b4a1-b2e2bb7f9a6d" containerName="neutron-httpd" Oct 06 21:50:10 crc kubenswrapper[5014]: I1006 21:50:10.939894 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:10 crc kubenswrapper[5014]: I1006 21:50:10.941306 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Oct 06 21:50:10 crc kubenswrapper[5014]: I1006 21:50:10.943904 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Oct 06 21:50:10 crc kubenswrapper[5014]: I1006 21:50:10.944657 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Oct 06 21:50:10 crc kubenswrapper[5014]: I1006 21:50:10.953994 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-796f6ffb8f-4rjtg"] Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.102240 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78a24140-d3a5-463a-aaf9-49857f14decc-log-httpd\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.102713 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-config-data\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.102744 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-public-tls-certs\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.102770 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/78a24140-d3a5-463a-aaf9-49857f14decc-etc-swift\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.103061 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-internal-tls-certs\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.103131 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzchk\" (UniqueName: \"kubernetes.io/projected/78a24140-d3a5-463a-aaf9-49857f14decc-kube-api-access-gzchk\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.103208 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-combined-ca-bundle\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.103369 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78a24140-d3a5-463a-aaf9-49857f14decc-run-httpd\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.205463 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78a24140-d3a5-463a-aaf9-49857f14decc-run-httpd\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.205655 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78a24140-d3a5-463a-aaf9-49857f14decc-log-httpd\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.205716 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-config-data\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.205758 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-public-tls-certs\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.205805 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/78a24140-d3a5-463a-aaf9-49857f14decc-etc-swift\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.206004 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-internal-tls-certs\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.206041 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzchk\" (UniqueName: \"kubernetes.io/projected/78a24140-d3a5-463a-aaf9-49857f14decc-kube-api-access-gzchk\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.206093 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-combined-ca-bundle\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.206099 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78a24140-d3a5-463a-aaf9-49857f14decc-run-httpd\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.206763 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78a24140-d3a5-463a-aaf9-49857f14decc-log-httpd\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.211794 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-config-data\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.220877 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/78a24140-d3a5-463a-aaf9-49857f14decc-etc-swift\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.221206 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-internal-tls-certs\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.221545 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-public-tls-certs\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.224365 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-combined-ca-bundle\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.230788 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzchk\" (UniqueName: \"kubernetes.io/projected/78a24140-d3a5-463a-aaf9-49857f14decc-kube-api-access-gzchk\") pod \"swift-proxy-796f6ffb8f-4rjtg\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.259265 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:11 crc kubenswrapper[5014]: I1006 21:50:11.845060 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-796f6ffb8f-4rjtg"] Oct 06 21:50:11 crc kubenswrapper[5014]: W1006 21:50:11.859432 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod78a24140_d3a5_463a_aaf9_49857f14decc.slice/crio-be32ce11a90718c0fd2883a20314f687ca16c197ca71b4d220ef215df90df5da WatchSource:0}: Error finding container be32ce11a90718c0fd2883a20314f687ca16c197ca71b4d220ef215df90df5da: Status 404 returned error can't find the container with id be32ce11a90718c0fd2883a20314f687ca16c197ca71b4d220ef215df90df5da Oct 06 21:50:12 crc kubenswrapper[5014]: I1006 21:50:12.388601 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" event={"ID":"78a24140-d3a5-463a-aaf9-49857f14decc","Type":"ContainerStarted","Data":"4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0"} Oct 06 21:50:12 crc kubenswrapper[5014]: I1006 21:50:12.389565 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" event={"ID":"78a24140-d3a5-463a-aaf9-49857f14decc","Type":"ContainerStarted","Data":"599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb"} Oct 06 21:50:12 crc kubenswrapper[5014]: I1006 21:50:12.389650 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" event={"ID":"78a24140-d3a5-463a-aaf9-49857f14decc","Type":"ContainerStarted","Data":"be32ce11a90718c0fd2883a20314f687ca16c197ca71b4d220ef215df90df5da"} Oct 06 21:50:12 crc kubenswrapper[5014]: I1006 21:50:12.389745 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:12 crc kubenswrapper[5014]: I1006 21:50:12.389840 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:12 crc kubenswrapper[5014]: I1006 21:50:12.408701 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" podStartSLOduration=2.408683221 podStartE2EDuration="2.408683221s" podCreationTimestamp="2025-10-06 21:50:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:50:12.406837482 +0000 UTC m=+1157.699874236" watchObservedRunningTime="2025-10-06 21:50:12.408683221 +0000 UTC m=+1157.701719945" Oct 06 21:50:12 crc kubenswrapper[5014]: I1006 21:50:12.490569 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:50:12 crc kubenswrapper[5014]: I1006 21:50:12.491999 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" containerName="glance-log" containerID="cri-o://0430c60c323101c1b753f11e9d36d10e421145df1577e22216b217a8af949311" gracePeriod=30 Oct 06 21:50:12 crc kubenswrapper[5014]: I1006 21:50:12.492231 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" containerName="glance-httpd" containerID="cri-o://e615d6fc74fa40caf082453baa3394e1e1aab609fbfb956be5bef45109329f1f" gracePeriod=30 Oct 06 21:50:12 crc kubenswrapper[5014]: I1006 21:50:12.899831 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Oct 06 21:50:12 crc kubenswrapper[5014]: I1006 21:50:12.902046 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:12 crc kubenswrapper[5014]: I1006 21:50:12.902426 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerName="proxy-httpd" containerID="cri-o://568911e5918392d69ef9b9424f083ece0a988d7f9a8eea2186dfbec890045616" gracePeriod=30 Oct 06 21:50:12 crc kubenswrapper[5014]: I1006 21:50:12.902469 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerName="ceilometer-notification-agent" containerID="cri-o://5ee57a4b6ef5208e43f440adcdbc9d68a3da1df52462af2d1f59430815cc4a88" gracePeriod=30 Oct 06 21:50:12 crc kubenswrapper[5014]: I1006 21:50:12.902506 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerName="sg-core" containerID="cri-o://fc8bba0b3b8dfbe3ea4e1140b7efac351af34ed8e1fbf0c92da877bb65ad2df9" gracePeriod=30 Oct 06 21:50:12 crc kubenswrapper[5014]: I1006 21:50:12.903892 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerName="ceilometer-central-agent" containerID="cri-o://ac96bb1f7ab1dbde49cf028d8c34fc0038e016ea162c03e99aa73384f854cb90" gracePeriod=30 Oct 06 21:50:12 crc kubenswrapper[5014]: I1006 21:50:12.920319 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Oct 06 21:50:13 crc kubenswrapper[5014]: I1006 21:50:13.402551 5014 generic.go:334] "Generic (PLEG): container finished" podID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerID="568911e5918392d69ef9b9424f083ece0a988d7f9a8eea2186dfbec890045616" exitCode=0 Oct 06 21:50:13 crc kubenswrapper[5014]: I1006 21:50:13.402584 5014 generic.go:334] "Generic (PLEG): container finished" podID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerID="fc8bba0b3b8dfbe3ea4e1140b7efac351af34ed8e1fbf0c92da877bb65ad2df9" exitCode=2 Oct 06 21:50:13 crc kubenswrapper[5014]: I1006 21:50:13.402594 5014 generic.go:334] "Generic (PLEG): container finished" podID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerID="ac96bb1f7ab1dbde49cf028d8c34fc0038e016ea162c03e99aa73384f854cb90" exitCode=0 Oct 06 21:50:13 crc kubenswrapper[5014]: I1006 21:50:13.402648 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a75612c-8bae-463f-ae97-d13f279e1a11","Type":"ContainerDied","Data":"568911e5918392d69ef9b9424f083ece0a988d7f9a8eea2186dfbec890045616"} Oct 06 21:50:13 crc kubenswrapper[5014]: I1006 21:50:13.402678 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a75612c-8bae-463f-ae97-d13f279e1a11","Type":"ContainerDied","Data":"fc8bba0b3b8dfbe3ea4e1140b7efac351af34ed8e1fbf0c92da877bb65ad2df9"} Oct 06 21:50:13 crc kubenswrapper[5014]: I1006 21:50:13.402690 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a75612c-8bae-463f-ae97-d13f279e1a11","Type":"ContainerDied","Data":"ac96bb1f7ab1dbde49cf028d8c34fc0038e016ea162c03e99aa73384f854cb90"} Oct 06 21:50:13 crc kubenswrapper[5014]: I1006 21:50:13.406217 5014 generic.go:334] "Generic (PLEG): container finished" podID="d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" containerID="0430c60c323101c1b753f11e9d36d10e421145df1577e22216b217a8af949311" exitCode=143 Oct 06 21:50:13 crc kubenswrapper[5014]: I1006 21:50:13.406260 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2","Type":"ContainerDied","Data":"0430c60c323101c1b753f11e9d36d10e421145df1577e22216b217a8af949311"} Oct 06 21:50:13 crc kubenswrapper[5014]: I1006 21:50:13.658278 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:50:13 crc kubenswrapper[5014]: I1006 21:50:13.658874 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="97665fa2-3321-4c58-b469-d19238d1d8fa" containerName="glance-log" containerID="cri-o://d1df02609b41c237d2564b4493e66e156d3221a07169d32ca23800175d82bca0" gracePeriod=30 Oct 06 21:50:13 crc kubenswrapper[5014]: I1006 21:50:13.659193 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="97665fa2-3321-4c58-b469-d19238d1d8fa" containerName="glance-httpd" containerID="cri-o://ba73765393bceab8b12f6265873f17ba00941d5cc2b2b7ce28e6e96fee51409a" gracePeriod=30 Oct 06 21:50:14 crc kubenswrapper[5014]: I1006 21:50:14.422055 5014 generic.go:334] "Generic (PLEG): container finished" podID="97665fa2-3321-4c58-b469-d19238d1d8fa" containerID="d1df02609b41c237d2564b4493e66e156d3221a07169d32ca23800175d82bca0" exitCode=143 Oct 06 21:50:14 crc kubenswrapper[5014]: I1006 21:50:14.422835 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"97665fa2-3321-4c58-b469-d19238d1d8fa","Type":"ContainerDied","Data":"d1df02609b41c237d2564b4493e66e156d3221a07169d32ca23800175d82bca0"} Oct 06 21:50:16 crc kubenswrapper[5014]: I1006 21:50:16.443177 5014 generic.go:334] "Generic (PLEG): container finished" podID="d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" containerID="e615d6fc74fa40caf082453baa3394e1e1aab609fbfb956be5bef45109329f1f" exitCode=0 Oct 06 21:50:16 crc kubenswrapper[5014]: I1006 21:50:16.443679 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2","Type":"ContainerDied","Data":"e615d6fc74fa40caf082453baa3394e1e1aab609fbfb956be5bef45109329f1f"} Oct 06 21:50:17 crc kubenswrapper[5014]: I1006 21:50:17.478782 5014 generic.go:334] "Generic (PLEG): container finished" podID="97665fa2-3321-4c58-b469-d19238d1d8fa" containerID="ba73765393bceab8b12f6265873f17ba00941d5cc2b2b7ce28e6e96fee51409a" exitCode=0 Oct 06 21:50:17 crc kubenswrapper[5014]: I1006 21:50:17.478828 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"97665fa2-3321-4c58-b469-d19238d1d8fa","Type":"ContainerDied","Data":"ba73765393bceab8b12f6265873f17ba00941d5cc2b2b7ce28e6e96fee51409a"} Oct 06 21:50:18 crc kubenswrapper[5014]: I1006 21:50:18.124835 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Oct 06 21:50:18 crc kubenswrapper[5014]: I1006 21:50:18.493860 5014 generic.go:334] "Generic (PLEG): container finished" podID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerID="5ee57a4b6ef5208e43f440adcdbc9d68a3da1df52462af2d1f59430815cc4a88" exitCode=0 Oct 06 21:50:18 crc kubenswrapper[5014]: I1006 21:50:18.493899 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a75612c-8bae-463f-ae97-d13f279e1a11","Type":"ContainerDied","Data":"5ee57a4b6ef5208e43f440adcdbc9d68a3da1df52462af2d1f59430815cc4a88"} Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.313387 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.357135 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.394418 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-scripts\") pod \"9a75612c-8bae-463f-ae97-d13f279e1a11\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.394540 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a75612c-8bae-463f-ae97-d13f279e1a11-run-httpd\") pod \"9a75612c-8bae-463f-ae97-d13f279e1a11\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.394582 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-sg-core-conf-yaml\") pod \"9a75612c-8bae-463f-ae97-d13f279e1a11\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.394636 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-config-data\") pod \"9a75612c-8bae-463f-ae97-d13f279e1a11\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.394656 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-combined-ca-bundle\") pod \"9a75612c-8bae-463f-ae97-d13f279e1a11\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.394681 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a75612c-8bae-463f-ae97-d13f279e1a11-log-httpd\") pod \"9a75612c-8bae-463f-ae97-d13f279e1a11\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.394749 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cd2bt\" (UniqueName: \"kubernetes.io/projected/9a75612c-8bae-463f-ae97-d13f279e1a11-kube-api-access-cd2bt\") pod \"9a75612c-8bae-463f-ae97-d13f279e1a11\" (UID: \"9a75612c-8bae-463f-ae97-d13f279e1a11\") " Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.399330 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a75612c-8bae-463f-ae97-d13f279e1a11-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9a75612c-8bae-463f-ae97-d13f279e1a11" (UID: "9a75612c-8bae-463f-ae97-d13f279e1a11"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.399349 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a75612c-8bae-463f-ae97-d13f279e1a11-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9a75612c-8bae-463f-ae97-d13f279e1a11" (UID: "9a75612c-8bae-463f-ae97-d13f279e1a11"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.419837 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-scripts" (OuterVolumeSpecName: "scripts") pod "9a75612c-8bae-463f-ae97-d13f279e1a11" (UID: "9a75612c-8bae-463f-ae97-d13f279e1a11"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.419909 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a75612c-8bae-463f-ae97-d13f279e1a11-kube-api-access-cd2bt" (OuterVolumeSpecName: "kube-api-access-cd2bt") pod "9a75612c-8bae-463f-ae97-d13f279e1a11" (UID: "9a75612c-8bae-463f-ae97-d13f279e1a11"). InnerVolumeSpecName "kube-api-access-cd2bt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.427381 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9a75612c-8bae-463f-ae97-d13f279e1a11" (UID: "9a75612c-8bae-463f-ae97-d13f279e1a11"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.496660 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-httpd-run\") pod \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.496723 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-combined-ca-bundle\") pod \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.496779 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-logs\") pod \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.496838 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-public-tls-certs\") pod \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.496883 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhwg5\" (UniqueName: \"kubernetes.io/projected/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-kube-api-access-nhwg5\") pod \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.496951 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-config-data\") pod \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.496987 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.497021 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-scripts\") pod \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\" (UID: \"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2\") " Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.497417 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.497435 5014 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a75612c-8bae-463f-ae97-d13f279e1a11-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.497451 5014 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.497460 5014 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9a75612c-8bae-463f-ae97-d13f279e1a11-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.497471 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cd2bt\" (UniqueName: \"kubernetes.io/projected/9a75612c-8bae-463f-ae97-d13f279e1a11-kube-api-access-cd2bt\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.498569 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9a75612c-8bae-463f-ae97-d13f279e1a11" (UID: "9a75612c-8bae-463f-ae97-d13f279e1a11"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.498577 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" (UID: "d5c167e1-eaa5-4ed5-aeaf-fba7170daec2"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.498863 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-logs" (OuterVolumeSpecName: "logs") pod "d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" (UID: "d5c167e1-eaa5-4ed5-aeaf-fba7170daec2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.503746 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" (UID: "d5c167e1-eaa5-4ed5-aeaf-fba7170daec2"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.505732 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-scripts" (OuterVolumeSpecName: "scripts") pod "d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" (UID: "d5c167e1-eaa5-4ed5-aeaf-fba7170daec2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.507793 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-kube-api-access-nhwg5" (OuterVolumeSpecName: "kube-api-access-nhwg5") pod "d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" (UID: "d5c167e1-eaa5-4ed5-aeaf-fba7170daec2"). InnerVolumeSpecName "kube-api-access-nhwg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.526063 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" (UID: "d5c167e1-eaa5-4ed5-aeaf-fba7170daec2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.532004 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d5c167e1-eaa5-4ed5-aeaf-fba7170daec2","Type":"ContainerDied","Data":"05301631f8a9771d5598613b5e3187b428ab613a73326585258ad97fa8ae7bd0"} Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.532055 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.532448 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-config-data" (OuterVolumeSpecName: "config-data") pod "9a75612c-8bae-463f-ae97-d13f279e1a11" (UID: "9a75612c-8bae-463f-ae97-d13f279e1a11"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.532061 5014 scope.go:117] "RemoveContainer" containerID="e615d6fc74fa40caf082453baa3394e1e1aab609fbfb956be5bef45109329f1f" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.537880 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9a75612c-8bae-463f-ae97-d13f279e1a11","Type":"ContainerDied","Data":"fff04932d5c45d5838c2e414dbc982e9868aefa0a2b95126b20d2dfb51a2b802"} Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.537908 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.539431 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9","Type":"ContainerStarted","Data":"085db7e17be44d9a79cea2381416abb7231f57828c4dbcfdbdfc446dac2fb6f0"} Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.559372 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-config-data" (OuterVolumeSpecName: "config-data") pod "d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" (UID: "d5c167e1-eaa5-4ed5-aeaf-fba7170daec2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.566586 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" (UID: "d5c167e1-eaa5-4ed5-aeaf-fba7170daec2"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.572823 5014 scope.go:117] "RemoveContainer" containerID="0430c60c323101c1b753f11e9d36d10e421145df1577e22216b217a8af949311" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.586221 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.046555261 podStartE2EDuration="14.586202244s" podCreationTimestamp="2025-10-06 21:50:06 +0000 UTC" firstStartedPulling="2025-10-06 21:50:07.464776874 +0000 UTC m=+1152.757813608" lastFinishedPulling="2025-10-06 21:50:20.004423857 +0000 UTC m=+1165.297460591" observedRunningTime="2025-10-06 21:50:20.57094705 +0000 UTC m=+1165.863983784" watchObservedRunningTime="2025-10-06 21:50:20.586202244 +0000 UTC m=+1165.879238978" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.598581 5014 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.598865 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.598876 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.598885 5014 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.598893 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhwg5\" (UniqueName: \"kubernetes.io/projected/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-kube-api-access-nhwg5\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.598902 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.598921 5014 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.598929 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.598939 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.598948 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a75612c-8bae-463f-ae97-d13f279e1a11-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.599673 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.601858 5014 scope.go:117] "RemoveContainer" containerID="568911e5918392d69ef9b9424f083ece0a988d7f9a8eea2186dfbec890045616" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.617840 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.628596 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:20 crc kubenswrapper[5014]: E1006 21:50:20.629011 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" containerName="glance-httpd" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.629028 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" containerName="glance-httpd" Oct 06 21:50:20 crc kubenswrapper[5014]: E1006 21:50:20.629052 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerName="ceilometer-central-agent" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.629060 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerName="ceilometer-central-agent" Oct 06 21:50:20 crc kubenswrapper[5014]: E1006 21:50:20.629073 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerName="proxy-httpd" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.629079 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerName="proxy-httpd" Oct 06 21:50:20 crc kubenswrapper[5014]: E1006 21:50:20.629092 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" containerName="glance-log" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.629097 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" containerName="glance-log" Oct 06 21:50:20 crc kubenswrapper[5014]: E1006 21:50:20.629109 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerName="sg-core" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.629116 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerName="sg-core" Oct 06 21:50:20 crc kubenswrapper[5014]: E1006 21:50:20.629133 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerName="ceilometer-notification-agent" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.629139 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerName="ceilometer-notification-agent" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.629289 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerName="sg-core" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.629300 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerName="ceilometer-central-agent" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.629312 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerName="proxy-httpd" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.629324 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" containerName="glance-log" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.629341 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" containerName="glance-httpd" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.629353 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" containerName="ceilometer-notification-agent" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.630529 5014 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.630917 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.635792 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.636087 5014 scope.go:117] "RemoveContainer" containerID="fc8bba0b3b8dfbe3ea4e1140b7efac351af34ed8e1fbf0c92da877bb65ad2df9" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.636155 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.638491 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.683774 5014 scope.go:117] "RemoveContainer" containerID="5ee57a4b6ef5208e43f440adcdbc9d68a3da1df52462af2d1f59430815cc4a88" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.700769 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.700872 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbh6x\" (UniqueName: \"kubernetes.io/projected/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-kube-api-access-vbh6x\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.700913 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-config-data\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.700988 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-log-httpd\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.701030 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-scripts\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.701059 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-run-httpd\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.701103 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.701179 5014 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.716298 5014 scope.go:117] "RemoveContainer" containerID="ac96bb1f7ab1dbde49cf028d8c34fc0038e016ea162c03e99aa73384f854cb90" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.802810 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-log-httpd\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.802889 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-scripts\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.802923 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-run-httpd\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.802961 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.803039 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.803102 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbh6x\" (UniqueName: \"kubernetes.io/projected/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-kube-api-access-vbh6x\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.803141 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-config-data\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.803203 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-log-httpd\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.807250 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.808202 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-config-data\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.808365 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.808641 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-run-httpd\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.817242 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-scripts\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.827301 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbh6x\" (UniqueName: \"kubernetes.io/projected/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-kube-api-access-vbh6x\") pod \"ceilometer-0\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " pod="openstack/ceilometer-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.889358 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.896282 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.925455 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.926954 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.928973 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.929241 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.958266 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:50:20 crc kubenswrapper[5014]: I1006 21:50:20.968002 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.008074 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-config-data\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.008145 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.008181 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a3bf50b-cb91-4201-affd-0c42d3585df2-logs\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.008206 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-scripts\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.008269 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkdlm\" (UniqueName: \"kubernetes.io/projected/4a3bf50b-cb91-4201-affd-0c42d3585df2-kube-api-access-gkdlm\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.008311 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a3bf50b-cb91-4201-affd-0c42d3585df2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.008364 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.008397 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.109873 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.110124 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.110166 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-config-data\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.110194 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.110216 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a3bf50b-cb91-4201-affd-0c42d3585df2-logs\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.110234 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-scripts\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.110280 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkdlm\" (UniqueName: \"kubernetes.io/projected/4a3bf50b-cb91-4201-affd-0c42d3585df2-kube-api-access-gkdlm\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.110705 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a3bf50b-cb91-4201-affd-0c42d3585df2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.111103 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a3bf50b-cb91-4201-affd-0c42d3585df2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.111341 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.111406 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a3bf50b-cb91-4201-affd-0c42d3585df2-logs\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.115548 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-scripts\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.126713 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.131557 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.134670 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkdlm\" (UniqueName: \"kubernetes.io/projected/4a3bf50b-cb91-4201-affd-0c42d3585df2-kube-api-access-gkdlm\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.134860 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-config-data\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.211644 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.267379 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.272401 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.273069 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.418693 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.500607 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a75612c-8bae-463f-ae97-d13f279e1a11" path="/var/lib/kubelet/pods/9a75612c-8bae-463f-ae97-d13f279e1a11/volumes" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.501661 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5c167e1-eaa5-4ed5-aeaf-fba7170daec2" path="/var/lib/kubelet/pods/d5c167e1-eaa5-4ed5-aeaf-fba7170daec2/volumes" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.524251 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-scripts\") pod \"97665fa2-3321-4c58-b469-d19238d1d8fa\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.524356 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"97665fa2-3321-4c58-b469-d19238d1d8fa\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.524431 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97665fa2-3321-4c58-b469-d19238d1d8fa-logs\") pod \"97665fa2-3321-4c58-b469-d19238d1d8fa\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.524459 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h68kf\" (UniqueName: \"kubernetes.io/projected/97665fa2-3321-4c58-b469-d19238d1d8fa-kube-api-access-h68kf\") pod \"97665fa2-3321-4c58-b469-d19238d1d8fa\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.524512 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-config-data\") pod \"97665fa2-3321-4c58-b469-d19238d1d8fa\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.524575 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-internal-tls-certs\") pod \"97665fa2-3321-4c58-b469-d19238d1d8fa\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.524656 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-combined-ca-bundle\") pod \"97665fa2-3321-4c58-b469-d19238d1d8fa\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.524707 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/97665fa2-3321-4c58-b469-d19238d1d8fa-httpd-run\") pod \"97665fa2-3321-4c58-b469-d19238d1d8fa\" (UID: \"97665fa2-3321-4c58-b469-d19238d1d8fa\") " Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.525447 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97665fa2-3321-4c58-b469-d19238d1d8fa-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "97665fa2-3321-4c58-b469-d19238d1d8fa" (UID: "97665fa2-3321-4c58-b469-d19238d1d8fa"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.527944 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97665fa2-3321-4c58-b469-d19238d1d8fa-logs" (OuterVolumeSpecName: "logs") pod "97665fa2-3321-4c58-b469-d19238d1d8fa" (UID: "97665fa2-3321-4c58-b469-d19238d1d8fa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.540840 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "97665fa2-3321-4c58-b469-d19238d1d8fa" (UID: "97665fa2-3321-4c58-b469-d19238d1d8fa"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.549879 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-scripts" (OuterVolumeSpecName: "scripts") pod "97665fa2-3321-4c58-b469-d19238d1d8fa" (UID: "97665fa2-3321-4c58-b469-d19238d1d8fa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.587595 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"97665fa2-3321-4c58-b469-d19238d1d8fa","Type":"ContainerDied","Data":"1bf245465c6b7e23fee3020e49c989301c2154a31a605e19250cd11487d99264"} Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.587671 5014 scope.go:117] "RemoveContainer" containerID="ba73765393bceab8b12f6265873f17ba00941d5cc2b2b7ce28e6e96fee51409a" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.587830 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.589108 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97665fa2-3321-4c58-b469-d19238d1d8fa-kube-api-access-h68kf" (OuterVolumeSpecName: "kube-api-access-h68kf") pod "97665fa2-3321-4c58-b469-d19238d1d8fa" (UID: "97665fa2-3321-4c58-b469-d19238d1d8fa"). InnerVolumeSpecName "kube-api-access-h68kf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.599748 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.609664 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97665fa2-3321-4c58-b469-d19238d1d8fa" (UID: "97665fa2-3321-4c58-b469-d19238d1d8fa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.627224 5014 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/97665fa2-3321-4c58-b469-d19238d1d8fa-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.627262 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.627287 5014 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.627298 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97665fa2-3321-4c58-b469-d19238d1d8fa-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.627309 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h68kf\" (UniqueName: \"kubernetes.io/projected/97665fa2-3321-4c58-b469-d19238d1d8fa-kube-api-access-h68kf\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.627321 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.651651 5014 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.657519 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-config-data" (OuterVolumeSpecName: "config-data") pod "97665fa2-3321-4c58-b469-d19238d1d8fa" (UID: "97665fa2-3321-4c58-b469-d19238d1d8fa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.657659 5014 scope.go:117] "RemoveContainer" containerID="d1df02609b41c237d2564b4493e66e156d3221a07169d32ca23800175d82bca0" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.687757 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "97665fa2-3321-4c58-b469-d19238d1d8fa" (UID: "97665fa2-3321-4c58-b469-d19238d1d8fa"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.729706 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.729740 5014 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97665fa2-3321-4c58-b469-d19238d1d8fa-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.729869 5014 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.735088 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.735484 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.971027 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.983772 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.994665 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:50:21 crc kubenswrapper[5014]: E1006 21:50:21.995169 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97665fa2-3321-4c58-b469-d19238d1d8fa" containerName="glance-httpd" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.995209 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="97665fa2-3321-4c58-b469-d19238d1d8fa" containerName="glance-httpd" Oct 06 21:50:21 crc kubenswrapper[5014]: E1006 21:50:21.995274 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97665fa2-3321-4c58-b469-d19238d1d8fa" containerName="glance-log" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.995282 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="97665fa2-3321-4c58-b469-d19238d1d8fa" containerName="glance-log" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.995512 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="97665fa2-3321-4c58-b469-d19238d1d8fa" containerName="glance-log" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.995531 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="97665fa2-3321-4c58-b469-d19238d1d8fa" containerName="glance-httpd" Oct 06 21:50:21 crc kubenswrapper[5014]: I1006 21:50:21.997124 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.003300 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.003328 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.037963 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.069031 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.136341 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/daa5c33b-d941-4030-bf9f-cd6ed831986e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.136409 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6w6t\" (UniqueName: \"kubernetes.io/projected/daa5c33b-d941-4030-bf9f-cd6ed831986e-kube-api-access-v6w6t\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.136534 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.136573 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.136608 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/daa5c33b-d941-4030-bf9f-cd6ed831986e-logs\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.136649 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.136786 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.136846 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.238870 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/daa5c33b-d941-4030-bf9f-cd6ed831986e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.239688 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6w6t\" (UniqueName: \"kubernetes.io/projected/daa5c33b-d941-4030-bf9f-cd6ed831986e-kube-api-access-v6w6t\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.239759 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.239801 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.239640 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/daa5c33b-d941-4030-bf9f-cd6ed831986e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.240174 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/daa5c33b-d941-4030-bf9f-cd6ed831986e-logs\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.240208 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.240274 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.240293 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.241271 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/daa5c33b-d941-4030-bf9f-cd6ed831986e-logs\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.241809 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.249547 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.253395 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.253921 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.256561 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6w6t\" (UniqueName: \"kubernetes.io/projected/daa5c33b-d941-4030-bf9f-cd6ed831986e-kube-api-access-v6w6t\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.259242 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.280359 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.320968 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.678101 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4a3bf50b-cb91-4201-affd-0c42d3585df2","Type":"ContainerStarted","Data":"aeb166394dfde50d2fd4ec25716d20aa19a01c64160be32d3a395f08beb79724"} Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.691284 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43","Type":"ContainerStarted","Data":"e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b"} Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.691330 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43","Type":"ContainerStarted","Data":"f12493ce1c58bf9504b33fea1cb3f70d2cfc5cc2faa29c0dff54c5137344bae3"} Oct 06 21:50:22 crc kubenswrapper[5014]: I1006 21:50:22.963610 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:50:23 crc kubenswrapper[5014]: I1006 21:50:23.498277 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97665fa2-3321-4c58-b469-d19238d1d8fa" path="/var/lib/kubelet/pods/97665fa2-3321-4c58-b469-d19238d1d8fa/volumes" Oct 06 21:50:23 crc kubenswrapper[5014]: I1006 21:50:23.704372 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4a3bf50b-cb91-4201-affd-0c42d3585df2","Type":"ContainerStarted","Data":"f799cb043ff7d806ae4f877f34b2084e03598231d0c40a3275884b945a348c9e"} Oct 06 21:50:23 crc kubenswrapper[5014]: I1006 21:50:23.704421 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4a3bf50b-cb91-4201-affd-0c42d3585df2","Type":"ContainerStarted","Data":"f69566e2159d9025363346b43e15495ad0dab925585a51aae0e3e6b46ac486f8"} Oct 06 21:50:23 crc kubenswrapper[5014]: I1006 21:50:23.709779 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43","Type":"ContainerStarted","Data":"28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a"} Oct 06 21:50:23 crc kubenswrapper[5014]: I1006 21:50:23.711351 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"daa5c33b-d941-4030-bf9f-cd6ed831986e","Type":"ContainerStarted","Data":"4563049f441233d9899b9510c09134c6b97cbbfee44d45e1869f367480e6495a"} Oct 06 21:50:23 crc kubenswrapper[5014]: I1006 21:50:23.711382 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"daa5c33b-d941-4030-bf9f-cd6ed831986e","Type":"ContainerStarted","Data":"7590737cacad4c6041ad9e0a8ef5ddc26a5bc9cf67233c4f1aba46d2b82548f6"} Oct 06 21:50:24 crc kubenswrapper[5014]: I1006 21:50:24.722791 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"daa5c33b-d941-4030-bf9f-cd6ed831986e","Type":"ContainerStarted","Data":"1b5ca8147b22af5bd68e4073c702853e918a3da61cc14255747ea6cd2310a427"} Oct 06 21:50:24 crc kubenswrapper[5014]: I1006 21:50:24.727804 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43","Type":"ContainerStarted","Data":"aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f"} Oct 06 21:50:24 crc kubenswrapper[5014]: I1006 21:50:24.749729 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.749709951 podStartE2EDuration="4.749709951s" podCreationTimestamp="2025-10-06 21:50:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:50:23.728942998 +0000 UTC m=+1169.021979732" watchObservedRunningTime="2025-10-06 21:50:24.749709951 +0000 UTC m=+1170.042746685" Oct 06 21:50:24 crc kubenswrapper[5014]: I1006 21:50:24.751742 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.751731866 podStartE2EDuration="3.751731866s" podCreationTimestamp="2025-10-06 21:50:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:50:24.746220811 +0000 UTC m=+1170.039257545" watchObservedRunningTime="2025-10-06 21:50:24.751731866 +0000 UTC m=+1170.044768600" Oct 06 21:50:25 crc kubenswrapper[5014]: I1006 21:50:25.003594 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:25 crc kubenswrapper[5014]: I1006 21:50:25.744797 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43","Type":"ContainerStarted","Data":"ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd"} Oct 06 21:50:25 crc kubenswrapper[5014]: I1006 21:50:25.745416 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerName="proxy-httpd" containerID="cri-o://ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd" gracePeriod=30 Oct 06 21:50:25 crc kubenswrapper[5014]: I1006 21:50:25.744943 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerName="ceilometer-central-agent" containerID="cri-o://e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b" gracePeriod=30 Oct 06 21:50:25 crc kubenswrapper[5014]: I1006 21:50:25.745541 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerName="sg-core" containerID="cri-o://aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f" gracePeriod=30 Oct 06 21:50:25 crc kubenswrapper[5014]: I1006 21:50:25.745661 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerName="ceilometer-notification-agent" containerID="cri-o://28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a" gracePeriod=30 Oct 06 21:50:25 crc kubenswrapper[5014]: I1006 21:50:25.745696 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 06 21:50:25 crc kubenswrapper[5014]: I1006 21:50:25.781452 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.145493812 podStartE2EDuration="5.781425333s" podCreationTimestamp="2025-10-06 21:50:20 +0000 UTC" firstStartedPulling="2025-10-06 21:50:21.60389309 +0000 UTC m=+1166.896929824" lastFinishedPulling="2025-10-06 21:50:25.239824611 +0000 UTC m=+1170.532861345" observedRunningTime="2025-10-06 21:50:25.774427351 +0000 UTC m=+1171.067464095" watchObservedRunningTime="2025-10-06 21:50:25.781425333 +0000 UTC m=+1171.074462067" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.703694 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.754346 5014 generic.go:334] "Generic (PLEG): container finished" podID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerID="ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd" exitCode=0 Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.754392 5014 generic.go:334] "Generic (PLEG): container finished" podID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerID="aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f" exitCode=2 Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.754402 5014 generic.go:334] "Generic (PLEG): container finished" podID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerID="28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a" exitCode=0 Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.754411 5014 generic.go:334] "Generic (PLEG): container finished" podID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerID="e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b" exitCode=0 Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.754432 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43","Type":"ContainerDied","Data":"ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd"} Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.754462 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43","Type":"ContainerDied","Data":"aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f"} Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.754477 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43","Type":"ContainerDied","Data":"28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a"} Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.754487 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43","Type":"ContainerDied","Data":"e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b"} Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.754496 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43","Type":"ContainerDied","Data":"f12493ce1c58bf9504b33fea1cb3f70d2cfc5cc2faa29c0dff54c5137344bae3"} Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.754512 5014 scope.go:117] "RemoveContainer" containerID="ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.754705 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.784456 5014 scope.go:117] "RemoveContainer" containerID="aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.788455 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vbh6x\" (UniqueName: \"kubernetes.io/projected/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-kube-api-access-vbh6x\") pod \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.788500 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-scripts\") pod \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.788567 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-combined-ca-bundle\") pod \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.788607 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-config-data\") pod \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.788699 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-sg-core-conf-yaml\") pod \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.788793 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-run-httpd\") pod \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.788863 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-log-httpd\") pod \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\" (UID: \"5aa05f75-6a28-4848-a2ed-e4fefa1b5a43\") " Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.790218 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" (UID: "5aa05f75-6a28-4848-a2ed-e4fefa1b5a43"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.790473 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" (UID: "5aa05f75-6a28-4848-a2ed-e4fefa1b5a43"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.812545 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-kube-api-access-vbh6x" (OuterVolumeSpecName: "kube-api-access-vbh6x") pod "5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" (UID: "5aa05f75-6a28-4848-a2ed-e4fefa1b5a43"). InnerVolumeSpecName "kube-api-access-vbh6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.815764 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-scripts" (OuterVolumeSpecName: "scripts") pod "5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" (UID: "5aa05f75-6a28-4848-a2ed-e4fefa1b5a43"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.818832 5014 scope.go:117] "RemoveContainer" containerID="28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.855877 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" (UID: "5aa05f75-6a28-4848-a2ed-e4fefa1b5a43"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.890711 5014 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.890745 5014 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.890757 5014 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.890769 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vbh6x\" (UniqueName: \"kubernetes.io/projected/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-kube-api-access-vbh6x\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.890781 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.899669 5014 scope.go:117] "RemoveContainer" containerID="e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.930269 5014 scope.go:117] "RemoveContainer" containerID="ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd" Oct 06 21:50:26 crc kubenswrapper[5014]: E1006 21:50:26.930974 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd\": container with ID starting with ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd not found: ID does not exist" containerID="ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.931029 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd"} err="failed to get container status \"ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd\": rpc error: code = NotFound desc = could not find container \"ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd\": container with ID starting with ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd not found: ID does not exist" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.931066 5014 scope.go:117] "RemoveContainer" containerID="aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f" Oct 06 21:50:26 crc kubenswrapper[5014]: E1006 21:50:26.931344 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f\": container with ID starting with aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f not found: ID does not exist" containerID="aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.931408 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f"} err="failed to get container status \"aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f\": rpc error: code = NotFound desc = could not find container \"aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f\": container with ID starting with aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f not found: ID does not exist" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.931428 5014 scope.go:117] "RemoveContainer" containerID="28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a" Oct 06 21:50:26 crc kubenswrapper[5014]: E1006 21:50:26.931875 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a\": container with ID starting with 28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a not found: ID does not exist" containerID="28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.931911 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a"} err="failed to get container status \"28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a\": rpc error: code = NotFound desc = could not find container \"28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a\": container with ID starting with 28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a not found: ID does not exist" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.931938 5014 scope.go:117] "RemoveContainer" containerID="e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b" Oct 06 21:50:26 crc kubenswrapper[5014]: E1006 21:50:26.935358 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b\": container with ID starting with e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b not found: ID does not exist" containerID="e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.935420 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b"} err="failed to get container status \"e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b\": rpc error: code = NotFound desc = could not find container \"e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b\": container with ID starting with e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b not found: ID does not exist" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.935441 5014 scope.go:117] "RemoveContainer" containerID="ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.936069 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd"} err="failed to get container status \"ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd\": rpc error: code = NotFound desc = could not find container \"ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd\": container with ID starting with ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd not found: ID does not exist" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.936094 5014 scope.go:117] "RemoveContainer" containerID="aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.936473 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f"} err="failed to get container status \"aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f\": rpc error: code = NotFound desc = could not find container \"aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f\": container with ID starting with aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f not found: ID does not exist" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.936493 5014 scope.go:117] "RemoveContainer" containerID="28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.941268 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a"} err="failed to get container status \"28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a\": rpc error: code = NotFound desc = could not find container \"28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a\": container with ID starting with 28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a not found: ID does not exist" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.941321 5014 scope.go:117] "RemoveContainer" containerID="e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.941761 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" (UID: "5aa05f75-6a28-4848-a2ed-e4fefa1b5a43"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.944326 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-config-data" (OuterVolumeSpecName: "config-data") pod "5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" (UID: "5aa05f75-6a28-4848-a2ed-e4fefa1b5a43"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.948138 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b"} err="failed to get container status \"e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b\": rpc error: code = NotFound desc = could not find container \"e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b\": container with ID starting with e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b not found: ID does not exist" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.948191 5014 scope.go:117] "RemoveContainer" containerID="ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.951935 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd"} err="failed to get container status \"ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd\": rpc error: code = NotFound desc = could not find container \"ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd\": container with ID starting with ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd not found: ID does not exist" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.951994 5014 scope.go:117] "RemoveContainer" containerID="aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.952270 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f"} err="failed to get container status \"aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f\": rpc error: code = NotFound desc = could not find container \"aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f\": container with ID starting with aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f not found: ID does not exist" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.952298 5014 scope.go:117] "RemoveContainer" containerID="28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.952560 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a"} err="failed to get container status \"28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a\": rpc error: code = NotFound desc = could not find container \"28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a\": container with ID starting with 28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a not found: ID does not exist" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.952581 5014 scope.go:117] "RemoveContainer" containerID="e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.966813 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b"} err="failed to get container status \"e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b\": rpc error: code = NotFound desc = could not find container \"e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b\": container with ID starting with e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b not found: ID does not exist" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.966869 5014 scope.go:117] "RemoveContainer" containerID="ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.972790 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd"} err="failed to get container status \"ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd\": rpc error: code = NotFound desc = could not find container \"ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd\": container with ID starting with ed61c6a433d2637f6fa607f97334fc9c354ccd8b58960eacfb7f2518d387fefd not found: ID does not exist" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.972848 5014 scope.go:117] "RemoveContainer" containerID="aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.977776 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f"} err="failed to get container status \"aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f\": rpc error: code = NotFound desc = could not find container \"aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f\": container with ID starting with aef5b2c6acf7535ac87ee56f3aaf0fdad4852df612695253e097f6e0a568c32f not found: ID does not exist" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.977821 5014 scope.go:117] "RemoveContainer" containerID="28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.981753 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a"} err="failed to get container status \"28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a\": rpc error: code = NotFound desc = could not find container \"28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a\": container with ID starting with 28258eb5f7e8bfa338a19d616ea7142552f030dd915baca52d7ac6a508f2c48a not found: ID does not exist" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.981792 5014 scope.go:117] "RemoveContainer" containerID="e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.987813 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b"} err="failed to get container status \"e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b\": rpc error: code = NotFound desc = could not find container \"e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b\": container with ID starting with e8533532e446bef032bf18b3d9623d241b7b61fbc903ab8075069ab12f76f58b not found: ID does not exist" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.993487 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:26 crc kubenswrapper[5014]: I1006 21:50:26.993512 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.092836 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.104552 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.126542 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:27 crc kubenswrapper[5014]: E1006 21:50:27.130272 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerName="sg-core" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.130322 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerName="sg-core" Oct 06 21:50:27 crc kubenswrapper[5014]: E1006 21:50:27.130351 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerName="proxy-httpd" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.130361 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerName="proxy-httpd" Oct 06 21:50:27 crc kubenswrapper[5014]: E1006 21:50:27.130377 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerName="ceilometer-notification-agent" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.130384 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerName="ceilometer-notification-agent" Oct 06 21:50:27 crc kubenswrapper[5014]: E1006 21:50:27.130397 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerName="ceilometer-central-agent" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.130405 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerName="ceilometer-central-agent" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.130654 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerName="ceilometer-notification-agent" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.130677 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerName="proxy-httpd" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.130687 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerName="sg-core" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.130717 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" containerName="ceilometer-central-agent" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.132603 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.135445 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.136734 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.145797 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.199632 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-run-httpd\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.199722 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.199780 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.199807 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-config-data\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.199843 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-log-httpd\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.199866 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-scripts\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.199900 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lx62s\" (UniqueName: \"kubernetes.io/projected/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-kube-api-access-lx62s\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.302108 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-run-httpd\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.302203 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.302258 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.302284 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-config-data\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.302325 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-log-httpd\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.302349 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-scripts\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.302382 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lx62s\" (UniqueName: \"kubernetes.io/projected/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-kube-api-access-lx62s\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.302714 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-run-httpd\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.303010 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-log-httpd\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.307543 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.308127 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-scripts\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.309313 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.310130 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-config-data\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.324504 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lx62s\" (UniqueName: \"kubernetes.io/projected/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-kube-api-access-lx62s\") pod \"ceilometer-0\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.453880 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.498535 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5aa05f75-6a28-4848-a2ed-e4fefa1b5a43" path="/var/lib/kubelet/pods/5aa05f75-6a28-4848-a2ed-e4fefa1b5a43/volumes" Oct 06 21:50:27 crc kubenswrapper[5014]: I1006 21:50:27.935448 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.501263 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-55rwp"] Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.503931 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-55rwp" Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.511792 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-55rwp"] Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.541233 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqw2c\" (UniqueName: \"kubernetes.io/projected/caa95dd3-2ffa-4b18-b7e9-ad4075b25304-kube-api-access-pqw2c\") pod \"nova-api-db-create-55rwp\" (UID: \"caa95dd3-2ffa-4b18-b7e9-ad4075b25304\") " pod="openstack/nova-api-db-create-55rwp" Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.589441 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-ptmjb"] Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.590629 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-ptmjb" Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.603038 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-ptmjb"] Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.649148 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rv8m2\" (UniqueName: \"kubernetes.io/projected/17a9c7b6-c877-4057-86ee-13d1ee4f9515-kube-api-access-rv8m2\") pod \"nova-cell0-db-create-ptmjb\" (UID: \"17a9c7b6-c877-4057-86ee-13d1ee4f9515\") " pod="openstack/nova-cell0-db-create-ptmjb" Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.649344 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqw2c\" (UniqueName: \"kubernetes.io/projected/caa95dd3-2ffa-4b18-b7e9-ad4075b25304-kube-api-access-pqw2c\") pod \"nova-api-db-create-55rwp\" (UID: \"caa95dd3-2ffa-4b18-b7e9-ad4075b25304\") " pod="openstack/nova-api-db-create-55rwp" Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.672720 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqw2c\" (UniqueName: \"kubernetes.io/projected/caa95dd3-2ffa-4b18-b7e9-ad4075b25304-kube-api-access-pqw2c\") pod \"nova-api-db-create-55rwp\" (UID: \"caa95dd3-2ffa-4b18-b7e9-ad4075b25304\") " pod="openstack/nova-api-db-create-55rwp" Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.702188 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-wmjhx"] Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.703396 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-wmjhx" Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.720167 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-wmjhx"] Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.750650 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znxxg\" (UniqueName: \"kubernetes.io/projected/347f9b08-ba1b-4065-be89-4a13a0f36b23-kube-api-access-znxxg\") pod \"nova-cell1-db-create-wmjhx\" (UID: \"347f9b08-ba1b-4065-be89-4a13a0f36b23\") " pod="openstack/nova-cell1-db-create-wmjhx" Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.750723 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rv8m2\" (UniqueName: \"kubernetes.io/projected/17a9c7b6-c877-4057-86ee-13d1ee4f9515-kube-api-access-rv8m2\") pod \"nova-cell0-db-create-ptmjb\" (UID: \"17a9c7b6-c877-4057-86ee-13d1ee4f9515\") " pod="openstack/nova-cell0-db-create-ptmjb" Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.787307 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rv8m2\" (UniqueName: \"kubernetes.io/projected/17a9c7b6-c877-4057-86ee-13d1ee4f9515-kube-api-access-rv8m2\") pod \"nova-cell0-db-create-ptmjb\" (UID: \"17a9c7b6-c877-4057-86ee-13d1ee4f9515\") " pod="openstack/nova-cell0-db-create-ptmjb" Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.794029 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6387bac-c8e9-49cb-9d8a-63f0e96dd160","Type":"ContainerStarted","Data":"49b1d0cd5b177d27281bab37406f44b4ae853851e2eabfc355491c71f0d0559c"} Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.794078 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6387bac-c8e9-49cb-9d8a-63f0e96dd160","Type":"ContainerStarted","Data":"2fe9dc971ec389052b0c0f0cdd07dab733e40f521e0a2e10c5efcde2ae644349"} Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.847322 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-55rwp" Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.852908 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znxxg\" (UniqueName: \"kubernetes.io/projected/347f9b08-ba1b-4065-be89-4a13a0f36b23-kube-api-access-znxxg\") pod \"nova-cell1-db-create-wmjhx\" (UID: \"347f9b08-ba1b-4065-be89-4a13a0f36b23\") " pod="openstack/nova-cell1-db-create-wmjhx" Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.874015 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znxxg\" (UniqueName: \"kubernetes.io/projected/347f9b08-ba1b-4065-be89-4a13a0f36b23-kube-api-access-znxxg\") pod \"nova-cell1-db-create-wmjhx\" (UID: \"347f9b08-ba1b-4065-be89-4a13a0f36b23\") " pod="openstack/nova-cell1-db-create-wmjhx" Oct 06 21:50:28 crc kubenswrapper[5014]: I1006 21:50:28.909286 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-ptmjb" Oct 06 21:50:29 crc kubenswrapper[5014]: I1006 21:50:29.050797 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-wmjhx" Oct 06 21:50:29 crc kubenswrapper[5014]: I1006 21:50:29.405826 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-55rwp"] Oct 06 21:50:29 crc kubenswrapper[5014]: W1006 21:50:29.424102 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcaa95dd3_2ffa_4b18_b7e9_ad4075b25304.slice/crio-cd88e077cb2fb36ce62e49a7370ae62bc92a9434f1946f0dcec915a44744fd01 WatchSource:0}: Error finding container cd88e077cb2fb36ce62e49a7370ae62bc92a9434f1946f0dcec915a44744fd01: Status 404 returned error can't find the container with id cd88e077cb2fb36ce62e49a7370ae62bc92a9434f1946f0dcec915a44744fd01 Oct 06 21:50:29 crc kubenswrapper[5014]: I1006 21:50:29.436370 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-ptmjb"] Oct 06 21:50:29 crc kubenswrapper[5014]: I1006 21:50:29.682421 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-wmjhx"] Oct 06 21:50:29 crc kubenswrapper[5014]: I1006 21:50:29.812819 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6387bac-c8e9-49cb-9d8a-63f0e96dd160","Type":"ContainerStarted","Data":"76acb8e7af3bc5886eab063ad4a4466b7e35c9e611b55cc721c901feafc9ebcc"} Oct 06 21:50:29 crc kubenswrapper[5014]: I1006 21:50:29.814438 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-wmjhx" event={"ID":"347f9b08-ba1b-4065-be89-4a13a0f36b23","Type":"ContainerStarted","Data":"5c630c9698cf0a3da878f6cacc6e223ccc0942b343a01cf64512321603580460"} Oct 06 21:50:29 crc kubenswrapper[5014]: I1006 21:50:29.817963 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-ptmjb" event={"ID":"17a9c7b6-c877-4057-86ee-13d1ee4f9515","Type":"ContainerStarted","Data":"160fc036b53fde699bc900f96f91e07f236ff55c5d39308a474b175d76ca1bff"} Oct 06 21:50:29 crc kubenswrapper[5014]: I1006 21:50:29.818022 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-ptmjb" event={"ID":"17a9c7b6-c877-4057-86ee-13d1ee4f9515","Type":"ContainerStarted","Data":"fe683613114323f383e903dfaf93a1627604bc692d86b0f78746425fd3881b09"} Oct 06 21:50:29 crc kubenswrapper[5014]: I1006 21:50:29.824753 5014 generic.go:334] "Generic (PLEG): container finished" podID="caa95dd3-2ffa-4b18-b7e9-ad4075b25304" containerID="cade81ead119d9b5f5ca886d347c02f3c9e6e5e57f2785ee7fe756b12f2fc74f" exitCode=0 Oct 06 21:50:29 crc kubenswrapper[5014]: I1006 21:50:29.824819 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-55rwp" event={"ID":"caa95dd3-2ffa-4b18-b7e9-ad4075b25304","Type":"ContainerDied","Data":"cade81ead119d9b5f5ca886d347c02f3c9e6e5e57f2785ee7fe756b12f2fc74f"} Oct 06 21:50:29 crc kubenswrapper[5014]: I1006 21:50:29.824858 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-55rwp" event={"ID":"caa95dd3-2ffa-4b18-b7e9-ad4075b25304","Type":"ContainerStarted","Data":"cd88e077cb2fb36ce62e49a7370ae62bc92a9434f1946f0dcec915a44744fd01"} Oct 06 21:50:29 crc kubenswrapper[5014]: I1006 21:50:29.842252 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-ptmjb" podStartSLOduration=1.842223462 podStartE2EDuration="1.842223462s" podCreationTimestamp="2025-10-06 21:50:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:50:29.832443892 +0000 UTC m=+1175.125480646" watchObservedRunningTime="2025-10-06 21:50:29.842223462 +0000 UTC m=+1175.135260186" Oct 06 21:50:30 crc kubenswrapper[5014]: I1006 21:50:30.847914 5014 generic.go:334] "Generic (PLEG): container finished" podID="17a9c7b6-c877-4057-86ee-13d1ee4f9515" containerID="160fc036b53fde699bc900f96f91e07f236ff55c5d39308a474b175d76ca1bff" exitCode=0 Oct 06 21:50:30 crc kubenswrapper[5014]: I1006 21:50:30.848056 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-ptmjb" event={"ID":"17a9c7b6-c877-4057-86ee-13d1ee4f9515","Type":"ContainerDied","Data":"160fc036b53fde699bc900f96f91e07f236ff55c5d39308a474b175d76ca1bff"} Oct 06 21:50:30 crc kubenswrapper[5014]: I1006 21:50:30.856299 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6387bac-c8e9-49cb-9d8a-63f0e96dd160","Type":"ContainerStarted","Data":"a65573ed849508b5da0971e13fbafa88e19e1900dfca2b38a5c925ae3c332490"} Oct 06 21:50:30 crc kubenswrapper[5014]: I1006 21:50:30.858466 5014 generic.go:334] "Generic (PLEG): container finished" podID="347f9b08-ba1b-4065-be89-4a13a0f36b23" containerID="27e08919fc63e9cbfab920a4f7742075cce87a6cd89ec57c9bd8e23ee7770c82" exitCode=0 Oct 06 21:50:30 crc kubenswrapper[5014]: I1006 21:50:30.858551 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-wmjhx" event={"ID":"347f9b08-ba1b-4065-be89-4a13a0f36b23","Type":"ContainerDied","Data":"27e08919fc63e9cbfab920a4f7742075cce87a6cd89ec57c9bd8e23ee7770c82"} Oct 06 21:50:31 crc kubenswrapper[5014]: I1006 21:50:31.253942 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-55rwp" Oct 06 21:50:31 crc kubenswrapper[5014]: I1006 21:50:31.274848 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 06 21:50:31 crc kubenswrapper[5014]: I1006 21:50:31.274920 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 06 21:50:31 crc kubenswrapper[5014]: I1006 21:50:31.327441 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 06 21:50:31 crc kubenswrapper[5014]: I1006 21:50:31.334120 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 06 21:50:31 crc kubenswrapper[5014]: I1006 21:50:31.411993 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pqw2c\" (UniqueName: \"kubernetes.io/projected/caa95dd3-2ffa-4b18-b7e9-ad4075b25304-kube-api-access-pqw2c\") pod \"caa95dd3-2ffa-4b18-b7e9-ad4075b25304\" (UID: \"caa95dd3-2ffa-4b18-b7e9-ad4075b25304\") " Oct 06 21:50:31 crc kubenswrapper[5014]: I1006 21:50:31.418138 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/caa95dd3-2ffa-4b18-b7e9-ad4075b25304-kube-api-access-pqw2c" (OuterVolumeSpecName: "kube-api-access-pqw2c") pod "caa95dd3-2ffa-4b18-b7e9-ad4075b25304" (UID: "caa95dd3-2ffa-4b18-b7e9-ad4075b25304"). InnerVolumeSpecName "kube-api-access-pqw2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:50:31 crc kubenswrapper[5014]: I1006 21:50:31.519420 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pqw2c\" (UniqueName: \"kubernetes.io/projected/caa95dd3-2ffa-4b18-b7e9-ad4075b25304-kube-api-access-pqw2c\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:31 crc kubenswrapper[5014]: I1006 21:50:31.872121 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6387bac-c8e9-49cb-9d8a-63f0e96dd160","Type":"ContainerStarted","Data":"577043a0175a0d75187f581285a46c4bab34bd3a54ff1806d439c7d27fa95b6d"} Oct 06 21:50:31 crc kubenswrapper[5014]: I1006 21:50:31.872678 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 06 21:50:31 crc kubenswrapper[5014]: I1006 21:50:31.874532 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-55rwp" event={"ID":"caa95dd3-2ffa-4b18-b7e9-ad4075b25304","Type":"ContainerDied","Data":"cd88e077cb2fb36ce62e49a7370ae62bc92a9434f1946f0dcec915a44744fd01"} Oct 06 21:50:31 crc kubenswrapper[5014]: I1006 21:50:31.874571 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd88e077cb2fb36ce62e49a7370ae62bc92a9434f1946f0dcec915a44744fd01" Oct 06 21:50:31 crc kubenswrapper[5014]: I1006 21:50:31.874595 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-55rwp" Oct 06 21:50:31 crc kubenswrapper[5014]: I1006 21:50:31.875447 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 06 21:50:31 crc kubenswrapper[5014]: I1006 21:50:31.875472 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 06 21:50:31 crc kubenswrapper[5014]: I1006 21:50:31.920963 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.466384944 podStartE2EDuration="4.92094302s" podCreationTimestamp="2025-10-06 21:50:27 +0000 UTC" firstStartedPulling="2025-10-06 21:50:27.930893055 +0000 UTC m=+1173.223929789" lastFinishedPulling="2025-10-06 21:50:31.385451131 +0000 UTC m=+1176.678487865" observedRunningTime="2025-10-06 21:50:31.916504709 +0000 UTC m=+1177.209541463" watchObservedRunningTime="2025-10-06 21:50:31.92094302 +0000 UTC m=+1177.213979764" Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.321187 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.321254 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.363435 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.367797 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.373713 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-ptmjb" Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.379568 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-wmjhx" Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.539357 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rv8m2\" (UniqueName: \"kubernetes.io/projected/17a9c7b6-c877-4057-86ee-13d1ee4f9515-kube-api-access-rv8m2\") pod \"17a9c7b6-c877-4057-86ee-13d1ee4f9515\" (UID: \"17a9c7b6-c877-4057-86ee-13d1ee4f9515\") " Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.539662 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-znxxg\" (UniqueName: \"kubernetes.io/projected/347f9b08-ba1b-4065-be89-4a13a0f36b23-kube-api-access-znxxg\") pod \"347f9b08-ba1b-4065-be89-4a13a0f36b23\" (UID: \"347f9b08-ba1b-4065-be89-4a13a0f36b23\") " Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.544781 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/347f9b08-ba1b-4065-be89-4a13a0f36b23-kube-api-access-znxxg" (OuterVolumeSpecName: "kube-api-access-znxxg") pod "347f9b08-ba1b-4065-be89-4a13a0f36b23" (UID: "347f9b08-ba1b-4065-be89-4a13a0f36b23"). InnerVolumeSpecName "kube-api-access-znxxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.546371 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17a9c7b6-c877-4057-86ee-13d1ee4f9515-kube-api-access-rv8m2" (OuterVolumeSpecName: "kube-api-access-rv8m2") pod "17a9c7b6-c877-4057-86ee-13d1ee4f9515" (UID: "17a9c7b6-c877-4057-86ee-13d1ee4f9515"). InnerVolumeSpecName "kube-api-access-rv8m2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.641867 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rv8m2\" (UniqueName: \"kubernetes.io/projected/17a9c7b6-c877-4057-86ee-13d1ee4f9515-kube-api-access-rv8m2\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.641904 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-znxxg\" (UniqueName: \"kubernetes.io/projected/347f9b08-ba1b-4065-be89-4a13a0f36b23-kube-api-access-znxxg\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.901234 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-wmjhx" Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.903721 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-wmjhx" event={"ID":"347f9b08-ba1b-4065-be89-4a13a0f36b23","Type":"ContainerDied","Data":"5c630c9698cf0a3da878f6cacc6e223ccc0942b343a01cf64512321603580460"} Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.903776 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c630c9698cf0a3da878f6cacc6e223ccc0942b343a01cf64512321603580460" Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.906649 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-ptmjb" Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.906632 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-ptmjb" event={"ID":"17a9c7b6-c877-4057-86ee-13d1ee4f9515","Type":"ContainerDied","Data":"fe683613114323f383e903dfaf93a1627604bc692d86b0f78746425fd3881b09"} Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.906841 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe683613114323f383e903dfaf93a1627604bc692d86b0f78746425fd3881b09" Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.908113 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 06 21:50:32 crc kubenswrapper[5014]: I1006 21:50:32.908206 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 06 21:50:33 crc kubenswrapper[5014]: I1006 21:50:33.906761 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 06 21:50:33 crc kubenswrapper[5014]: I1006 21:50:33.918577 5014 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 06 21:50:34 crc kubenswrapper[5014]: I1006 21:50:34.171690 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 06 21:50:35 crc kubenswrapper[5014]: I1006 21:50:35.188534 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 06 21:50:35 crc kubenswrapper[5014]: I1006 21:50:35.190492 5014 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 06 21:50:35 crc kubenswrapper[5014]: I1006 21:50:35.266488 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.734475 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-4b16-account-create-qrrfb"] Oct 06 21:50:38 crc kubenswrapper[5014]: E1006 21:50:38.735903 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caa95dd3-2ffa-4b18-b7e9-ad4075b25304" containerName="mariadb-database-create" Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.735933 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="caa95dd3-2ffa-4b18-b7e9-ad4075b25304" containerName="mariadb-database-create" Oct 06 21:50:38 crc kubenswrapper[5014]: E1006 21:50:38.735973 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17a9c7b6-c877-4057-86ee-13d1ee4f9515" containerName="mariadb-database-create" Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.735987 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="17a9c7b6-c877-4057-86ee-13d1ee4f9515" containerName="mariadb-database-create" Oct 06 21:50:38 crc kubenswrapper[5014]: E1006 21:50:38.736021 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="347f9b08-ba1b-4065-be89-4a13a0f36b23" containerName="mariadb-database-create" Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.736040 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="347f9b08-ba1b-4065-be89-4a13a0f36b23" containerName="mariadb-database-create" Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.736382 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="caa95dd3-2ffa-4b18-b7e9-ad4075b25304" containerName="mariadb-database-create" Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.736424 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="17a9c7b6-c877-4057-86ee-13d1ee4f9515" containerName="mariadb-database-create" Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.736448 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="347f9b08-ba1b-4065-be89-4a13a0f36b23" containerName="mariadb-database-create" Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.737940 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-4b16-account-create-qrrfb" Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.740556 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.748224 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-4b16-account-create-qrrfb"] Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.774677 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cw5h\" (UniqueName: \"kubernetes.io/projected/68ae1bd5-0dc8-4d02-b060-421ad434a8bd-kube-api-access-2cw5h\") pod \"nova-api-4b16-account-create-qrrfb\" (UID: \"68ae1bd5-0dc8-4d02-b060-421ad434a8bd\") " pod="openstack/nova-api-4b16-account-create-qrrfb" Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.876694 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cw5h\" (UniqueName: \"kubernetes.io/projected/68ae1bd5-0dc8-4d02-b060-421ad434a8bd-kube-api-access-2cw5h\") pod \"nova-api-4b16-account-create-qrrfb\" (UID: \"68ae1bd5-0dc8-4d02-b060-421ad434a8bd\") " pod="openstack/nova-api-4b16-account-create-qrrfb" Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.915339 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cw5h\" (UniqueName: \"kubernetes.io/projected/68ae1bd5-0dc8-4d02-b060-421ad434a8bd-kube-api-access-2cw5h\") pod \"nova-api-4b16-account-create-qrrfb\" (UID: \"68ae1bd5-0dc8-4d02-b060-421ad434a8bd\") " pod="openstack/nova-api-4b16-account-create-qrrfb" Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.923992 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-ce1b-account-create-cjdcf"] Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.925420 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ce1b-account-create-cjdcf" Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.928652 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.949105 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-ce1b-account-create-cjdcf"] Oct 06 21:50:38 crc kubenswrapper[5014]: I1006 21:50:38.978131 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9hnb\" (UniqueName: \"kubernetes.io/projected/ff881d46-e8a9-4eb9-a009-faffb59e898b-kube-api-access-d9hnb\") pod \"nova-cell0-ce1b-account-create-cjdcf\" (UID: \"ff881d46-e8a9-4eb9-a009-faffb59e898b\") " pod="openstack/nova-cell0-ce1b-account-create-cjdcf" Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.071644 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-4b16-account-create-qrrfb" Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.082733 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9hnb\" (UniqueName: \"kubernetes.io/projected/ff881d46-e8a9-4eb9-a009-faffb59e898b-kube-api-access-d9hnb\") pod \"nova-cell0-ce1b-account-create-cjdcf\" (UID: \"ff881d46-e8a9-4eb9-a009-faffb59e898b\") " pod="openstack/nova-cell0-ce1b-account-create-cjdcf" Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.122440 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-b5d1-account-create-sl9xc"] Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.125973 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b5d1-account-create-sl9xc" Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.127852 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9hnb\" (UniqueName: \"kubernetes.io/projected/ff881d46-e8a9-4eb9-a009-faffb59e898b-kube-api-access-d9hnb\") pod \"nova-cell0-ce1b-account-create-cjdcf\" (UID: \"ff881d46-e8a9-4eb9-a009-faffb59e898b\") " pod="openstack/nova-cell0-ce1b-account-create-cjdcf" Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.128051 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.143340 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-b5d1-account-create-sl9xc"] Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.184598 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9p2gm\" (UniqueName: \"kubernetes.io/projected/56178220-15bb-4fed-9fcf-0f6e34ffeb3e-kube-api-access-9p2gm\") pod \"nova-cell1-b5d1-account-create-sl9xc\" (UID: \"56178220-15bb-4fed-9fcf-0f6e34ffeb3e\") " pod="openstack/nova-cell1-b5d1-account-create-sl9xc" Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.287258 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9p2gm\" (UniqueName: \"kubernetes.io/projected/56178220-15bb-4fed-9fcf-0f6e34ffeb3e-kube-api-access-9p2gm\") pod \"nova-cell1-b5d1-account-create-sl9xc\" (UID: \"56178220-15bb-4fed-9fcf-0f6e34ffeb3e\") " pod="openstack/nova-cell1-b5d1-account-create-sl9xc" Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.291161 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ce1b-account-create-cjdcf" Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.318387 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9p2gm\" (UniqueName: \"kubernetes.io/projected/56178220-15bb-4fed-9fcf-0f6e34ffeb3e-kube-api-access-9p2gm\") pod \"nova-cell1-b5d1-account-create-sl9xc\" (UID: \"56178220-15bb-4fed-9fcf-0f6e34ffeb3e\") " pod="openstack/nova-cell1-b5d1-account-create-sl9xc" Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.569156 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b5d1-account-create-sl9xc" Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.651439 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-4b16-account-create-qrrfb"] Oct 06 21:50:39 crc kubenswrapper[5014]: W1006 21:50:39.665773 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod68ae1bd5_0dc8_4d02_b060_421ad434a8bd.slice/crio-0a95fc52e74cc7ccc7e3448b302cbdf0a81a508694d494729b956ec232d7c698 WatchSource:0}: Error finding container 0a95fc52e74cc7ccc7e3448b302cbdf0a81a508694d494729b956ec232d7c698: Status 404 returned error can't find the container with id 0a95fc52e74cc7ccc7e3448b302cbdf0a81a508694d494729b956ec232d7c698 Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.853431 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-ce1b-account-create-cjdcf"] Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.973213 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ce1b-account-create-cjdcf" event={"ID":"ff881d46-e8a9-4eb9-a009-faffb59e898b","Type":"ContainerStarted","Data":"8db3886b466a63f106df175e565303ce2ba6c968d6a8bce52702b92d89d7a574"} Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.974819 5014 generic.go:334] "Generic (PLEG): container finished" podID="68ae1bd5-0dc8-4d02-b060-421ad434a8bd" containerID="c724c6629b1de99ef2ecd3c56620586eab0b64f00960198734d644642c218918" exitCode=0 Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.974861 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-4b16-account-create-qrrfb" event={"ID":"68ae1bd5-0dc8-4d02-b060-421ad434a8bd","Type":"ContainerDied","Data":"c724c6629b1de99ef2ecd3c56620586eab0b64f00960198734d644642c218918"} Oct 06 21:50:39 crc kubenswrapper[5014]: I1006 21:50:39.974885 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-4b16-account-create-qrrfb" event={"ID":"68ae1bd5-0dc8-4d02-b060-421ad434a8bd","Type":"ContainerStarted","Data":"0a95fc52e74cc7ccc7e3448b302cbdf0a81a508694d494729b956ec232d7c698"} Oct 06 21:50:40 crc kubenswrapper[5014]: I1006 21:50:40.051200 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-b5d1-account-create-sl9xc"] Oct 06 21:50:40 crc kubenswrapper[5014]: W1006 21:50:40.070147 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod56178220_15bb_4fed_9fcf_0f6e34ffeb3e.slice/crio-e762d0be3bc29ba9a8ba64f848a7166b7d78e84e7e14b042db91288f904c3aa9 WatchSource:0}: Error finding container e762d0be3bc29ba9a8ba64f848a7166b7d78e84e7e14b042db91288f904c3aa9: Status 404 returned error can't find the container with id e762d0be3bc29ba9a8ba64f848a7166b7d78e84e7e14b042db91288f904c3aa9 Oct 06 21:50:40 crc kubenswrapper[5014]: I1006 21:50:40.994502 5014 generic.go:334] "Generic (PLEG): container finished" podID="56178220-15bb-4fed-9fcf-0f6e34ffeb3e" containerID="f275100bad062a3e3dac1f99ba18a85645c9e3a83e238cb17a68f9c0a6030257" exitCode=0 Oct 06 21:50:40 crc kubenswrapper[5014]: I1006 21:50:40.995638 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b5d1-account-create-sl9xc" event={"ID":"56178220-15bb-4fed-9fcf-0f6e34ffeb3e","Type":"ContainerDied","Data":"f275100bad062a3e3dac1f99ba18a85645c9e3a83e238cb17a68f9c0a6030257"} Oct 06 21:50:40 crc kubenswrapper[5014]: I1006 21:50:40.996000 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b5d1-account-create-sl9xc" event={"ID":"56178220-15bb-4fed-9fcf-0f6e34ffeb3e","Type":"ContainerStarted","Data":"e762d0be3bc29ba9a8ba64f848a7166b7d78e84e7e14b042db91288f904c3aa9"} Oct 06 21:50:40 crc kubenswrapper[5014]: I1006 21:50:40.998293 5014 generic.go:334] "Generic (PLEG): container finished" podID="ff881d46-e8a9-4eb9-a009-faffb59e898b" containerID="1f2fab62bca88836e6746c95b9eff65328fee516558f263d1cb4288d17877aac" exitCode=0 Oct 06 21:50:40 crc kubenswrapper[5014]: I1006 21:50:40.998779 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ce1b-account-create-cjdcf" event={"ID":"ff881d46-e8a9-4eb9-a009-faffb59e898b","Type":"ContainerDied","Data":"1f2fab62bca88836e6746c95b9eff65328fee516558f263d1cb4288d17877aac"} Oct 06 21:50:41 crc kubenswrapper[5014]: I1006 21:50:41.339504 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-4b16-account-create-qrrfb" Oct 06 21:50:41 crc kubenswrapper[5014]: I1006 21:50:41.429945 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2cw5h\" (UniqueName: \"kubernetes.io/projected/68ae1bd5-0dc8-4d02-b060-421ad434a8bd-kube-api-access-2cw5h\") pod \"68ae1bd5-0dc8-4d02-b060-421ad434a8bd\" (UID: \"68ae1bd5-0dc8-4d02-b060-421ad434a8bd\") " Oct 06 21:50:41 crc kubenswrapper[5014]: I1006 21:50:41.437975 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68ae1bd5-0dc8-4d02-b060-421ad434a8bd-kube-api-access-2cw5h" (OuterVolumeSpecName: "kube-api-access-2cw5h") pod "68ae1bd5-0dc8-4d02-b060-421ad434a8bd" (UID: "68ae1bd5-0dc8-4d02-b060-421ad434a8bd"). InnerVolumeSpecName "kube-api-access-2cw5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:50:41 crc kubenswrapper[5014]: I1006 21:50:41.498673 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:41 crc kubenswrapper[5014]: I1006 21:50:41.499011 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="ceilometer-central-agent" containerID="cri-o://49b1d0cd5b177d27281bab37406f44b4ae853851e2eabfc355491c71f0d0559c" gracePeriod=30 Oct 06 21:50:41 crc kubenswrapper[5014]: I1006 21:50:41.499160 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="proxy-httpd" containerID="cri-o://577043a0175a0d75187f581285a46c4bab34bd3a54ff1806d439c7d27fa95b6d" gracePeriod=30 Oct 06 21:50:41 crc kubenswrapper[5014]: I1006 21:50:41.499310 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="sg-core" containerID="cri-o://a65573ed849508b5da0971e13fbafa88e19e1900dfca2b38a5c925ae3c332490" gracePeriod=30 Oct 06 21:50:41 crc kubenswrapper[5014]: I1006 21:50:41.499335 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="ceilometer-notification-agent" containerID="cri-o://76acb8e7af3bc5886eab063ad4a4466b7e35c9e611b55cc721c901feafc9ebcc" gracePeriod=30 Oct 06 21:50:41 crc kubenswrapper[5014]: I1006 21:50:41.531929 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2cw5h\" (UniqueName: \"kubernetes.io/projected/68ae1bd5-0dc8-4d02-b060-421ad434a8bd-kube-api-access-2cw5h\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:41 crc kubenswrapper[5014]: I1006 21:50:41.615660 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.173:3000/\": read tcp 10.217.0.2:34952->10.217.0.173:3000: read: connection reset by peer" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.012338 5014 generic.go:334] "Generic (PLEG): container finished" podID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerID="577043a0175a0d75187f581285a46c4bab34bd3a54ff1806d439c7d27fa95b6d" exitCode=0 Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.013994 5014 generic.go:334] "Generic (PLEG): container finished" podID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerID="a65573ed849508b5da0971e13fbafa88e19e1900dfca2b38a5c925ae3c332490" exitCode=2 Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.014045 5014 generic.go:334] "Generic (PLEG): container finished" podID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerID="49b1d0cd5b177d27281bab37406f44b4ae853851e2eabfc355491c71f0d0559c" exitCode=0 Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.012383 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6387bac-c8e9-49cb-9d8a-63f0e96dd160","Type":"ContainerDied","Data":"577043a0175a0d75187f581285a46c4bab34bd3a54ff1806d439c7d27fa95b6d"} Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.014111 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6387bac-c8e9-49cb-9d8a-63f0e96dd160","Type":"ContainerDied","Data":"a65573ed849508b5da0971e13fbafa88e19e1900dfca2b38a5c925ae3c332490"} Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.014131 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6387bac-c8e9-49cb-9d8a-63f0e96dd160","Type":"ContainerDied","Data":"49b1d0cd5b177d27281bab37406f44b4ae853851e2eabfc355491c71f0d0559c"} Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.017003 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-4b16-account-create-qrrfb" event={"ID":"68ae1bd5-0dc8-4d02-b060-421ad434a8bd","Type":"ContainerDied","Data":"0a95fc52e74cc7ccc7e3448b302cbdf0a81a508694d494729b956ec232d7c698"} Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.017184 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a95fc52e74cc7ccc7e3448b302cbdf0a81a508694d494729b956ec232d7c698" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.017079 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-4b16-account-create-qrrfb" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.447736 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ce1b-account-create-cjdcf" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.493948 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b5d1-account-create-sl9xc" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.496378 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.549644 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-run-httpd\") pod \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.549713 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lx62s\" (UniqueName: \"kubernetes.io/projected/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-kube-api-access-lx62s\") pod \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.549764 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-sg-core-conf-yaml\") pod \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.549790 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d9hnb\" (UniqueName: \"kubernetes.io/projected/ff881d46-e8a9-4eb9-a009-faffb59e898b-kube-api-access-d9hnb\") pod \"ff881d46-e8a9-4eb9-a009-faffb59e898b\" (UID: \"ff881d46-e8a9-4eb9-a009-faffb59e898b\") " Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.549823 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-combined-ca-bundle\") pod \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.549854 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9p2gm\" (UniqueName: \"kubernetes.io/projected/56178220-15bb-4fed-9fcf-0f6e34ffeb3e-kube-api-access-9p2gm\") pod \"56178220-15bb-4fed-9fcf-0f6e34ffeb3e\" (UID: \"56178220-15bb-4fed-9fcf-0f6e34ffeb3e\") " Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.549877 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-config-data\") pod \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.549904 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-scripts\") pod \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.549926 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-log-httpd\") pod \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\" (UID: \"c6387bac-c8e9-49cb-9d8a-63f0e96dd160\") " Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.551245 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c6387bac-c8e9-49cb-9d8a-63f0e96dd160" (UID: "c6387bac-c8e9-49cb-9d8a-63f0e96dd160"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.551952 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c6387bac-c8e9-49cb-9d8a-63f0e96dd160" (UID: "c6387bac-c8e9-49cb-9d8a-63f0e96dd160"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.554793 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff881d46-e8a9-4eb9-a009-faffb59e898b-kube-api-access-d9hnb" (OuterVolumeSpecName: "kube-api-access-d9hnb") pod "ff881d46-e8a9-4eb9-a009-faffb59e898b" (UID: "ff881d46-e8a9-4eb9-a009-faffb59e898b"). InnerVolumeSpecName "kube-api-access-d9hnb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.555521 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-kube-api-access-lx62s" (OuterVolumeSpecName: "kube-api-access-lx62s") pod "c6387bac-c8e9-49cb-9d8a-63f0e96dd160" (UID: "c6387bac-c8e9-49cb-9d8a-63f0e96dd160"). InnerVolumeSpecName "kube-api-access-lx62s". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.555590 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56178220-15bb-4fed-9fcf-0f6e34ffeb3e-kube-api-access-9p2gm" (OuterVolumeSpecName: "kube-api-access-9p2gm") pod "56178220-15bb-4fed-9fcf-0f6e34ffeb3e" (UID: "56178220-15bb-4fed-9fcf-0f6e34ffeb3e"). InnerVolumeSpecName "kube-api-access-9p2gm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.563531 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-scripts" (OuterVolumeSpecName: "scripts") pod "c6387bac-c8e9-49cb-9d8a-63f0e96dd160" (UID: "c6387bac-c8e9-49cb-9d8a-63f0e96dd160"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.577545 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c6387bac-c8e9-49cb-9d8a-63f0e96dd160" (UID: "c6387bac-c8e9-49cb-9d8a-63f0e96dd160"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.617752 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c6387bac-c8e9-49cb-9d8a-63f0e96dd160" (UID: "c6387bac-c8e9-49cb-9d8a-63f0e96dd160"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.649922 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-config-data" (OuterVolumeSpecName: "config-data") pod "c6387bac-c8e9-49cb-9d8a-63f0e96dd160" (UID: "c6387bac-c8e9-49cb-9d8a-63f0e96dd160"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.652006 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lx62s\" (UniqueName: \"kubernetes.io/projected/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-kube-api-access-lx62s\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.652032 5014 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.652070 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d9hnb\" (UniqueName: \"kubernetes.io/projected/ff881d46-e8a9-4eb9-a009-faffb59e898b-kube-api-access-d9hnb\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.652082 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.652093 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9p2gm\" (UniqueName: \"kubernetes.io/projected/56178220-15bb-4fed-9fcf-0f6e34ffeb3e-kube-api-access-9p2gm\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.652104 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.652140 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.652155 5014 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:42 crc kubenswrapper[5014]: I1006 21:50:42.652167 5014 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6387bac-c8e9-49cb-9d8a-63f0e96dd160-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.027947 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b5d1-account-create-sl9xc" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.027978 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b5d1-account-create-sl9xc" event={"ID":"56178220-15bb-4fed-9fcf-0f6e34ffeb3e","Type":"ContainerDied","Data":"e762d0be3bc29ba9a8ba64f848a7166b7d78e84e7e14b042db91288f904c3aa9"} Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.028382 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e762d0be3bc29ba9a8ba64f848a7166b7d78e84e7e14b042db91288f904c3aa9" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.031183 5014 generic.go:334] "Generic (PLEG): container finished" podID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerID="76acb8e7af3bc5886eab063ad4a4466b7e35c9e611b55cc721c901feafc9ebcc" exitCode=0 Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.031221 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6387bac-c8e9-49cb-9d8a-63f0e96dd160","Type":"ContainerDied","Data":"76acb8e7af3bc5886eab063ad4a4466b7e35c9e611b55cc721c901feafc9ebcc"} Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.031276 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6387bac-c8e9-49cb-9d8a-63f0e96dd160","Type":"ContainerDied","Data":"2fe9dc971ec389052b0c0f0cdd07dab733e40f521e0a2e10c5efcde2ae644349"} Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.031316 5014 scope.go:117] "RemoveContainer" containerID="577043a0175a0d75187f581285a46c4bab34bd3a54ff1806d439c7d27fa95b6d" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.031828 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.033529 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ce1b-account-create-cjdcf" event={"ID":"ff881d46-e8a9-4eb9-a009-faffb59e898b","Type":"ContainerDied","Data":"8db3886b466a63f106df175e565303ce2ba6c968d6a8bce52702b92d89d7a574"} Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.033570 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8db3886b466a63f106df175e565303ce2ba6c968d6a8bce52702b92d89d7a574" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.033681 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ce1b-account-create-cjdcf" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.058241 5014 scope.go:117] "RemoveContainer" containerID="a65573ed849508b5da0971e13fbafa88e19e1900dfca2b38a5c925ae3c332490" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.087395 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.104387 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.107807 5014 scope.go:117] "RemoveContainer" containerID="76acb8e7af3bc5886eab063ad4a4466b7e35c9e611b55cc721c901feafc9ebcc" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.135551 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:43 crc kubenswrapper[5014]: E1006 21:50:43.136082 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff881d46-e8a9-4eb9-a009-faffb59e898b" containerName="mariadb-account-create" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.136105 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff881d46-e8a9-4eb9-a009-faffb59e898b" containerName="mariadb-account-create" Oct 06 21:50:43 crc kubenswrapper[5014]: E1006 21:50:43.136119 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68ae1bd5-0dc8-4d02-b060-421ad434a8bd" containerName="mariadb-account-create" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.136129 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="68ae1bd5-0dc8-4d02-b060-421ad434a8bd" containerName="mariadb-account-create" Oct 06 21:50:43 crc kubenswrapper[5014]: E1006 21:50:43.136150 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="ceilometer-notification-agent" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.136159 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="ceilometer-notification-agent" Oct 06 21:50:43 crc kubenswrapper[5014]: E1006 21:50:43.136169 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="proxy-httpd" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.136177 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="proxy-httpd" Oct 06 21:50:43 crc kubenswrapper[5014]: E1006 21:50:43.136201 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="ceilometer-central-agent" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.136212 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="ceilometer-central-agent" Oct 06 21:50:43 crc kubenswrapper[5014]: E1006 21:50:43.136224 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="sg-core" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.136232 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="sg-core" Oct 06 21:50:43 crc kubenswrapper[5014]: E1006 21:50:43.136245 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56178220-15bb-4fed-9fcf-0f6e34ffeb3e" containerName="mariadb-account-create" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.136253 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="56178220-15bb-4fed-9fcf-0f6e34ffeb3e" containerName="mariadb-account-create" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.136476 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="68ae1bd5-0dc8-4d02-b060-421ad434a8bd" containerName="mariadb-account-create" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.136493 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="proxy-httpd" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.136514 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="56178220-15bb-4fed-9fcf-0f6e34ffeb3e" containerName="mariadb-account-create" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.136523 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff881d46-e8a9-4eb9-a009-faffb59e898b" containerName="mariadb-account-create" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.136538 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="ceilometer-notification-agent" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.136554 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="sg-core" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.136564 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" containerName="ceilometer-central-agent" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.138765 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.142941 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.143053 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.148717 5014 scope.go:117] "RemoveContainer" containerID="49b1d0cd5b177d27281bab37406f44b4ae853851e2eabfc355491c71f0d0559c" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.163768 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p75z8\" (UniqueName: \"kubernetes.io/projected/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-kube-api-access-p75z8\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.163862 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.163901 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.163941 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-config-data\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.163974 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-scripts\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.164060 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-log-httpd\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.164088 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-run-httpd\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.167232 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.176457 5014 scope.go:117] "RemoveContainer" containerID="577043a0175a0d75187f581285a46c4bab34bd3a54ff1806d439c7d27fa95b6d" Oct 06 21:50:43 crc kubenswrapper[5014]: E1006 21:50:43.177022 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"577043a0175a0d75187f581285a46c4bab34bd3a54ff1806d439c7d27fa95b6d\": container with ID starting with 577043a0175a0d75187f581285a46c4bab34bd3a54ff1806d439c7d27fa95b6d not found: ID does not exist" containerID="577043a0175a0d75187f581285a46c4bab34bd3a54ff1806d439c7d27fa95b6d" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.177065 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"577043a0175a0d75187f581285a46c4bab34bd3a54ff1806d439c7d27fa95b6d"} err="failed to get container status \"577043a0175a0d75187f581285a46c4bab34bd3a54ff1806d439c7d27fa95b6d\": rpc error: code = NotFound desc = could not find container \"577043a0175a0d75187f581285a46c4bab34bd3a54ff1806d439c7d27fa95b6d\": container with ID starting with 577043a0175a0d75187f581285a46c4bab34bd3a54ff1806d439c7d27fa95b6d not found: ID does not exist" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.177095 5014 scope.go:117] "RemoveContainer" containerID="a65573ed849508b5da0971e13fbafa88e19e1900dfca2b38a5c925ae3c332490" Oct 06 21:50:43 crc kubenswrapper[5014]: E1006 21:50:43.177383 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a65573ed849508b5da0971e13fbafa88e19e1900dfca2b38a5c925ae3c332490\": container with ID starting with a65573ed849508b5da0971e13fbafa88e19e1900dfca2b38a5c925ae3c332490 not found: ID does not exist" containerID="a65573ed849508b5da0971e13fbafa88e19e1900dfca2b38a5c925ae3c332490" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.177407 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a65573ed849508b5da0971e13fbafa88e19e1900dfca2b38a5c925ae3c332490"} err="failed to get container status \"a65573ed849508b5da0971e13fbafa88e19e1900dfca2b38a5c925ae3c332490\": rpc error: code = NotFound desc = could not find container \"a65573ed849508b5da0971e13fbafa88e19e1900dfca2b38a5c925ae3c332490\": container with ID starting with a65573ed849508b5da0971e13fbafa88e19e1900dfca2b38a5c925ae3c332490 not found: ID does not exist" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.177420 5014 scope.go:117] "RemoveContainer" containerID="76acb8e7af3bc5886eab063ad4a4466b7e35c9e611b55cc721c901feafc9ebcc" Oct 06 21:50:43 crc kubenswrapper[5014]: E1006 21:50:43.177591 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76acb8e7af3bc5886eab063ad4a4466b7e35c9e611b55cc721c901feafc9ebcc\": container with ID starting with 76acb8e7af3bc5886eab063ad4a4466b7e35c9e611b55cc721c901feafc9ebcc not found: ID does not exist" containerID="76acb8e7af3bc5886eab063ad4a4466b7e35c9e611b55cc721c901feafc9ebcc" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.177610 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76acb8e7af3bc5886eab063ad4a4466b7e35c9e611b55cc721c901feafc9ebcc"} err="failed to get container status \"76acb8e7af3bc5886eab063ad4a4466b7e35c9e611b55cc721c901feafc9ebcc\": rpc error: code = NotFound desc = could not find container \"76acb8e7af3bc5886eab063ad4a4466b7e35c9e611b55cc721c901feafc9ebcc\": container with ID starting with 76acb8e7af3bc5886eab063ad4a4466b7e35c9e611b55cc721c901feafc9ebcc not found: ID does not exist" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.177635 5014 scope.go:117] "RemoveContainer" containerID="49b1d0cd5b177d27281bab37406f44b4ae853851e2eabfc355491c71f0d0559c" Oct 06 21:50:43 crc kubenswrapper[5014]: E1006 21:50:43.177800 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49b1d0cd5b177d27281bab37406f44b4ae853851e2eabfc355491c71f0d0559c\": container with ID starting with 49b1d0cd5b177d27281bab37406f44b4ae853851e2eabfc355491c71f0d0559c not found: ID does not exist" containerID="49b1d0cd5b177d27281bab37406f44b4ae853851e2eabfc355491c71f0d0559c" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.177819 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49b1d0cd5b177d27281bab37406f44b4ae853851e2eabfc355491c71f0d0559c"} err="failed to get container status \"49b1d0cd5b177d27281bab37406f44b4ae853851e2eabfc355491c71f0d0559c\": rpc error: code = NotFound desc = could not find container \"49b1d0cd5b177d27281bab37406f44b4ae853851e2eabfc355491c71f0d0559c\": container with ID starting with 49b1d0cd5b177d27281bab37406f44b4ae853851e2eabfc355491c71f0d0559c not found: ID does not exist" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.265742 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-config-data\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.265800 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-scripts\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.265886 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-log-httpd\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.265921 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-run-httpd\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.265959 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p75z8\" (UniqueName: \"kubernetes.io/projected/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-kube-api-access-p75z8\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.266023 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.266058 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.266739 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-run-httpd\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.267001 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-log-httpd\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.271442 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-config-data\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.272010 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.272352 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-scripts\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.280887 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.282546 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p75z8\" (UniqueName: \"kubernetes.io/projected/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-kube-api-access-p75z8\") pod \"ceilometer-0\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.457281 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.499174 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6387bac-c8e9-49cb-9d8a-63f0e96dd160" path="/var/lib/kubelet/pods/c6387bac-c8e9-49cb-9d8a-63f0e96dd160/volumes" Oct 06 21:50:43 crc kubenswrapper[5014]: I1006 21:50:43.987633 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:43 crc kubenswrapper[5014]: W1006 21:50:43.992139 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca81e166_fb50_4f4a_8e0a_f2b77cc764a4.slice/crio-78398e0412f09af77cac01239bb50328df5a2925c54a8399ebd7f6e7468ba8cb WatchSource:0}: Error finding container 78398e0412f09af77cac01239bb50328df5a2925c54a8399ebd7f6e7468ba8cb: Status 404 returned error can't find the container with id 78398e0412f09af77cac01239bb50328df5a2925c54a8399ebd7f6e7468ba8cb Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.050001 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4","Type":"ContainerStarted","Data":"78398e0412f09af77cac01239bb50328df5a2925c54a8399ebd7f6e7468ba8cb"} Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.172748 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-z8mlq"] Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.174134 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-z8mlq" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.176229 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.176420 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-5wwlh" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.181733 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.186146 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-z8mlq"] Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.284928 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-config-data\") pod \"nova-cell0-conductor-db-sync-z8mlq\" (UID: \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\") " pod="openstack/nova-cell0-conductor-db-sync-z8mlq" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.285412 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-scripts\") pod \"nova-cell0-conductor-db-sync-z8mlq\" (UID: \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\") " pod="openstack/nova-cell0-conductor-db-sync-z8mlq" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.285595 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9p9c\" (UniqueName: \"kubernetes.io/projected/8d74142f-3a0a-4a47-99ce-be7ecac62f76-kube-api-access-z9p9c\") pod \"nova-cell0-conductor-db-sync-z8mlq\" (UID: \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\") " pod="openstack/nova-cell0-conductor-db-sync-z8mlq" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.285694 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-z8mlq\" (UID: \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\") " pod="openstack/nova-cell0-conductor-db-sync-z8mlq" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.387589 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-scripts\") pod \"nova-cell0-conductor-db-sync-z8mlq\" (UID: \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\") " pod="openstack/nova-cell0-conductor-db-sync-z8mlq" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.387743 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9p9c\" (UniqueName: \"kubernetes.io/projected/8d74142f-3a0a-4a47-99ce-be7ecac62f76-kube-api-access-z9p9c\") pod \"nova-cell0-conductor-db-sync-z8mlq\" (UID: \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\") " pod="openstack/nova-cell0-conductor-db-sync-z8mlq" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.387777 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-z8mlq\" (UID: \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\") " pod="openstack/nova-cell0-conductor-db-sync-z8mlq" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.387864 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-config-data\") pod \"nova-cell0-conductor-db-sync-z8mlq\" (UID: \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\") " pod="openstack/nova-cell0-conductor-db-sync-z8mlq" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.393429 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-z8mlq\" (UID: \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\") " pod="openstack/nova-cell0-conductor-db-sync-z8mlq" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.393589 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-scripts\") pod \"nova-cell0-conductor-db-sync-z8mlq\" (UID: \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\") " pod="openstack/nova-cell0-conductor-db-sync-z8mlq" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.394217 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-config-data\") pod \"nova-cell0-conductor-db-sync-z8mlq\" (UID: \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\") " pod="openstack/nova-cell0-conductor-db-sync-z8mlq" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.406769 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9p9c\" (UniqueName: \"kubernetes.io/projected/8d74142f-3a0a-4a47-99ce-be7ecac62f76-kube-api-access-z9p9c\") pod \"nova-cell0-conductor-db-sync-z8mlq\" (UID: \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\") " pod="openstack/nova-cell0-conductor-db-sync-z8mlq" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.491167 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-z8mlq" Oct 06 21:50:44 crc kubenswrapper[5014]: I1006 21:50:44.983314 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-z8mlq"] Oct 06 21:50:44 crc kubenswrapper[5014]: W1006 21:50:44.986452 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d74142f_3a0a_4a47_99ce_be7ecac62f76.slice/crio-d62b88ef37124e078284aef9016bc207cf513274a6f551712c4df83ea5771371 WatchSource:0}: Error finding container d62b88ef37124e078284aef9016bc207cf513274a6f551712c4df83ea5771371: Status 404 returned error can't find the container with id d62b88ef37124e078284aef9016bc207cf513274a6f551712c4df83ea5771371 Oct 06 21:50:45 crc kubenswrapper[5014]: I1006 21:50:45.065809 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4","Type":"ContainerStarted","Data":"dcd04408df2d8af7cd311b38a208f84246d6a166bbb0b823bc1867611f46b663"} Oct 06 21:50:45 crc kubenswrapper[5014]: I1006 21:50:45.067512 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-z8mlq" event={"ID":"8d74142f-3a0a-4a47-99ce-be7ecac62f76","Type":"ContainerStarted","Data":"d62b88ef37124e078284aef9016bc207cf513274a6f551712c4df83ea5771371"} Oct 06 21:50:47 crc kubenswrapper[5014]: I1006 21:50:47.112015 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4","Type":"ContainerStarted","Data":"6850b1c9f883a65a0e76de16ffa7c2d7d8638795ae0f986fa5fc1128d6fef157"} Oct 06 21:50:48 crc kubenswrapper[5014]: I1006 21:50:48.124811 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4","Type":"ContainerStarted","Data":"8d3920eb51d08c4f08b63283a87cf44d31f8ef5b19c5033724a304596d76fc51"} Oct 06 21:50:49 crc kubenswrapper[5014]: I1006 21:50:49.804903 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:51 crc kubenswrapper[5014]: I1006 21:50:51.735728 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:50:51 crc kubenswrapper[5014]: I1006 21:50:51.736058 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:50:53 crc kubenswrapper[5014]: I1006 21:50:53.176802 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4","Type":"ContainerStarted","Data":"170fdab0439cff914cb52afcc85d54eb64e4526a84fec42f26196282916f5ba6"} Oct 06 21:50:53 crc kubenswrapper[5014]: I1006 21:50:53.178611 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerName="ceilometer-central-agent" containerID="cri-o://dcd04408df2d8af7cd311b38a208f84246d6a166bbb0b823bc1867611f46b663" gracePeriod=30 Oct 06 21:50:53 crc kubenswrapper[5014]: I1006 21:50:53.179244 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 06 21:50:53 crc kubenswrapper[5014]: I1006 21:50:53.179816 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerName="proxy-httpd" containerID="cri-o://170fdab0439cff914cb52afcc85d54eb64e4526a84fec42f26196282916f5ba6" gracePeriod=30 Oct 06 21:50:53 crc kubenswrapper[5014]: I1006 21:50:53.180060 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerName="sg-core" containerID="cri-o://8d3920eb51d08c4f08b63283a87cf44d31f8ef5b19c5033724a304596d76fc51" gracePeriod=30 Oct 06 21:50:53 crc kubenswrapper[5014]: I1006 21:50:53.180342 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerName="ceilometer-notification-agent" containerID="cri-o://6850b1c9f883a65a0e76de16ffa7c2d7d8638795ae0f986fa5fc1128d6fef157" gracePeriod=30 Oct 06 21:50:53 crc kubenswrapper[5014]: I1006 21:50:53.185433 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-z8mlq" event={"ID":"8d74142f-3a0a-4a47-99ce-be7ecac62f76","Type":"ContainerStarted","Data":"099e875c4a6824b8758649c5a1258b7544aa15f8c03b6558b91615a7b48a8016"} Oct 06 21:50:53 crc kubenswrapper[5014]: I1006 21:50:53.207043 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.877565432 podStartE2EDuration="10.207024254s" podCreationTimestamp="2025-10-06 21:50:43 +0000 UTC" firstStartedPulling="2025-10-06 21:50:43.99493471 +0000 UTC m=+1189.287971444" lastFinishedPulling="2025-10-06 21:50:52.324393522 +0000 UTC m=+1197.617430266" observedRunningTime="2025-10-06 21:50:53.20532472 +0000 UTC m=+1198.498361454" watchObservedRunningTime="2025-10-06 21:50:53.207024254 +0000 UTC m=+1198.500060988" Oct 06 21:50:53 crc kubenswrapper[5014]: I1006 21:50:53.243103 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-z8mlq" podStartSLOduration=1.894294086 podStartE2EDuration="9.243086688s" podCreationTimestamp="2025-10-06 21:50:44 +0000 UTC" firstStartedPulling="2025-10-06 21:50:44.989263504 +0000 UTC m=+1190.282300238" lastFinishedPulling="2025-10-06 21:50:52.338056106 +0000 UTC m=+1197.631092840" observedRunningTime="2025-10-06 21:50:53.238379549 +0000 UTC m=+1198.531416283" watchObservedRunningTime="2025-10-06 21:50:53.243086688 +0000 UTC m=+1198.536123422" Oct 06 21:50:54 crc kubenswrapper[5014]: I1006 21:50:54.200485 5014 generic.go:334] "Generic (PLEG): container finished" podID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerID="170fdab0439cff914cb52afcc85d54eb64e4526a84fec42f26196282916f5ba6" exitCode=0 Oct 06 21:50:54 crc kubenswrapper[5014]: I1006 21:50:54.200522 5014 generic.go:334] "Generic (PLEG): container finished" podID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerID="8d3920eb51d08c4f08b63283a87cf44d31f8ef5b19c5033724a304596d76fc51" exitCode=2 Oct 06 21:50:54 crc kubenswrapper[5014]: I1006 21:50:54.200532 5014 generic.go:334] "Generic (PLEG): container finished" podID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerID="6850b1c9f883a65a0e76de16ffa7c2d7d8638795ae0f986fa5fc1128d6fef157" exitCode=0 Oct 06 21:50:54 crc kubenswrapper[5014]: I1006 21:50:54.200588 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4","Type":"ContainerDied","Data":"170fdab0439cff914cb52afcc85d54eb64e4526a84fec42f26196282916f5ba6"} Oct 06 21:50:54 crc kubenswrapper[5014]: I1006 21:50:54.200664 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4","Type":"ContainerDied","Data":"8d3920eb51d08c4f08b63283a87cf44d31f8ef5b19c5033724a304596d76fc51"} Oct 06 21:50:54 crc kubenswrapper[5014]: I1006 21:50:54.200684 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4","Type":"ContainerDied","Data":"6850b1c9f883a65a0e76de16ffa7c2d7d8638795ae0f986fa5fc1128d6fef157"} Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.219872 5014 generic.go:334] "Generic (PLEG): container finished" podID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerID="dcd04408df2d8af7cd311b38a208f84246d6a166bbb0b823bc1867611f46b663" exitCode=0 Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.220245 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4","Type":"ContainerDied","Data":"dcd04408df2d8af7cd311b38a208f84246d6a166bbb0b823bc1867611f46b663"} Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.388173 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.494749 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-log-httpd\") pod \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.495393 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p75z8\" (UniqueName: \"kubernetes.io/projected/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-kube-api-access-p75z8\") pod \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.495449 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-run-httpd\") pod \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.495463 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" (UID: "ca81e166-fb50-4f4a-8e0a-f2b77cc764a4"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.495473 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-combined-ca-bundle\") pod \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.495522 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-config-data\") pod \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.495576 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-sg-core-conf-yaml\") pod \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.495667 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-scripts\") pod \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\" (UID: \"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4\") " Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.496332 5014 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.496758 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" (UID: "ca81e166-fb50-4f4a-8e0a-f2b77cc764a4"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.502700 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-scripts" (OuterVolumeSpecName: "scripts") pod "ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" (UID: "ca81e166-fb50-4f4a-8e0a-f2b77cc764a4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.515307 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-kube-api-access-p75z8" (OuterVolumeSpecName: "kube-api-access-p75z8") pod "ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" (UID: "ca81e166-fb50-4f4a-8e0a-f2b77cc764a4"). InnerVolumeSpecName "kube-api-access-p75z8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.532792 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" (UID: "ca81e166-fb50-4f4a-8e0a-f2b77cc764a4"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.581584 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" (UID: "ca81e166-fb50-4f4a-8e0a-f2b77cc764a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.598998 5014 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.599029 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.599040 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p75z8\" (UniqueName: \"kubernetes.io/projected/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-kube-api-access-p75z8\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.599052 5014 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.599061 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.624510 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-config-data" (OuterVolumeSpecName: "config-data") pod "ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" (UID: "ca81e166-fb50-4f4a-8e0a-f2b77cc764a4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:50:55 crc kubenswrapper[5014]: I1006 21:50:55.700297 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.231441 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca81e166-fb50-4f4a-8e0a-f2b77cc764a4","Type":"ContainerDied","Data":"78398e0412f09af77cac01239bb50328df5a2925c54a8399ebd7f6e7468ba8cb"} Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.231537 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.232963 5014 scope.go:117] "RemoveContainer" containerID="170fdab0439cff914cb52afcc85d54eb64e4526a84fec42f26196282916f5ba6" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.264804 5014 scope.go:117] "RemoveContainer" containerID="8d3920eb51d08c4f08b63283a87cf44d31f8ef5b19c5033724a304596d76fc51" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.281109 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.288513 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.302178 5014 scope.go:117] "RemoveContainer" containerID="6850b1c9f883a65a0e76de16ffa7c2d7d8638795ae0f986fa5fc1128d6fef157" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.323715 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:56 crc kubenswrapper[5014]: E1006 21:50:56.324282 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerName="proxy-httpd" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.324311 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerName="proxy-httpd" Oct 06 21:50:56 crc kubenswrapper[5014]: E1006 21:50:56.324336 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerName="sg-core" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.324351 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerName="sg-core" Oct 06 21:50:56 crc kubenswrapper[5014]: E1006 21:50:56.324404 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerName="ceilometer-central-agent" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.324418 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerName="ceilometer-central-agent" Oct 06 21:50:56 crc kubenswrapper[5014]: E1006 21:50:56.324438 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerName="ceilometer-notification-agent" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.324449 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerName="ceilometer-notification-agent" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.324761 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerName="ceilometer-notification-agent" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.324793 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerName="ceilometer-central-agent" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.324829 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerName="proxy-httpd" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.324845 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" containerName="sg-core" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.327824 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.334319 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.334448 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.338990 5014 scope.go:117] "RemoveContainer" containerID="dcd04408df2d8af7cd311b38a208f84246d6a166bbb0b823bc1867611f46b663" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.349236 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.511892 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.512034 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-config-data\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.512057 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.512229 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee8201de-0efa-4162-9ccf-0ab77a414bab-log-httpd\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.512306 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sfpq\" (UniqueName: \"kubernetes.io/projected/ee8201de-0efa-4162-9ccf-0ab77a414bab-kube-api-access-5sfpq\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.512344 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-scripts\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.512411 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee8201de-0efa-4162-9ccf-0ab77a414bab-run-httpd\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.613497 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-config-data\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.613539 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.613588 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee8201de-0efa-4162-9ccf-0ab77a414bab-log-httpd\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.613640 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sfpq\" (UniqueName: \"kubernetes.io/projected/ee8201de-0efa-4162-9ccf-0ab77a414bab-kube-api-access-5sfpq\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.613672 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-scripts\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.613720 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee8201de-0efa-4162-9ccf-0ab77a414bab-run-httpd\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.613768 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.614298 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee8201de-0efa-4162-9ccf-0ab77a414bab-log-httpd\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.614518 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee8201de-0efa-4162-9ccf-0ab77a414bab-run-httpd\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.621052 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-scripts\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.622183 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.623451 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.624463 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-config-data\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.640765 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sfpq\" (UniqueName: \"kubernetes.io/projected/ee8201de-0efa-4162-9ccf-0ab77a414bab-kube-api-access-5sfpq\") pod \"ceilometer-0\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " pod="openstack/ceilometer-0" Oct 06 21:50:56 crc kubenswrapper[5014]: I1006 21:50:56.654183 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:50:57 crc kubenswrapper[5014]: I1006 21:50:57.183854 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:50:57 crc kubenswrapper[5014]: I1006 21:50:57.249558 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ee8201de-0efa-4162-9ccf-0ab77a414bab","Type":"ContainerStarted","Data":"3db34027d3353c0790a5c2e7a1e28d8e46abe94a7ecfb705d807e343e95d0f2d"} Oct 06 21:50:57 crc kubenswrapper[5014]: I1006 21:50:57.508088 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca81e166-fb50-4f4a-8e0a-f2b77cc764a4" path="/var/lib/kubelet/pods/ca81e166-fb50-4f4a-8e0a-f2b77cc764a4/volumes" Oct 06 21:50:58 crc kubenswrapper[5014]: I1006 21:50:58.263092 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ee8201de-0efa-4162-9ccf-0ab77a414bab","Type":"ContainerStarted","Data":"76240dd94d6f9661e716d26feb123b9bb4a498809ee39ed2e4c4eed642fa31e1"} Oct 06 21:50:59 crc kubenswrapper[5014]: I1006 21:50:59.280273 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ee8201de-0efa-4162-9ccf-0ab77a414bab","Type":"ContainerStarted","Data":"c276f25de93b08ef19f70714adc9dc42e6b05adfaa0940fd1d8c4f8855fd0945"} Oct 06 21:51:00 crc kubenswrapper[5014]: I1006 21:51:00.303711 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ee8201de-0efa-4162-9ccf-0ab77a414bab","Type":"ContainerStarted","Data":"6e7e916ca9e9909b3ea0020a6575030777e32134b63488d9097d2871994c67d4"} Oct 06 21:51:01 crc kubenswrapper[5014]: I1006 21:51:01.320249 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ee8201de-0efa-4162-9ccf-0ab77a414bab","Type":"ContainerStarted","Data":"e5ad15eb0aae365619d30dfc71dabab11ca5a0cc6a4f711c86f56a3713a08fb3"} Oct 06 21:51:01 crc kubenswrapper[5014]: I1006 21:51:01.322735 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 06 21:51:01 crc kubenswrapper[5014]: I1006 21:51:01.363480 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.665446809 podStartE2EDuration="5.363457619s" podCreationTimestamp="2025-10-06 21:50:56 +0000 UTC" firstStartedPulling="2025-10-06 21:50:57.197590375 +0000 UTC m=+1202.490627109" lastFinishedPulling="2025-10-06 21:51:00.895601185 +0000 UTC m=+1206.188637919" observedRunningTime="2025-10-06 21:51:01.35248488 +0000 UTC m=+1206.645521614" watchObservedRunningTime="2025-10-06 21:51:01.363457619 +0000 UTC m=+1206.656494383" Oct 06 21:51:03 crc kubenswrapper[5014]: I1006 21:51:03.344272 5014 generic.go:334] "Generic (PLEG): container finished" podID="8d74142f-3a0a-4a47-99ce-be7ecac62f76" containerID="099e875c4a6824b8758649c5a1258b7544aa15f8c03b6558b91615a7b48a8016" exitCode=0 Oct 06 21:51:03 crc kubenswrapper[5014]: I1006 21:51:03.344369 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-z8mlq" event={"ID":"8d74142f-3a0a-4a47-99ce-be7ecac62f76","Type":"ContainerDied","Data":"099e875c4a6824b8758649c5a1258b7544aa15f8c03b6558b91615a7b48a8016"} Oct 06 21:51:04 crc kubenswrapper[5014]: I1006 21:51:04.770188 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-z8mlq" Oct 06 21:51:04 crc kubenswrapper[5014]: I1006 21:51:04.883530 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-config-data\") pod \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\" (UID: \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\") " Oct 06 21:51:04 crc kubenswrapper[5014]: I1006 21:51:04.883755 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-scripts\") pod \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\" (UID: \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\") " Oct 06 21:51:04 crc kubenswrapper[5014]: I1006 21:51:04.883859 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9p9c\" (UniqueName: \"kubernetes.io/projected/8d74142f-3a0a-4a47-99ce-be7ecac62f76-kube-api-access-z9p9c\") pod \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\" (UID: \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\") " Oct 06 21:51:04 crc kubenswrapper[5014]: I1006 21:51:04.884097 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-combined-ca-bundle\") pod \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\" (UID: \"8d74142f-3a0a-4a47-99ce-be7ecac62f76\") " Oct 06 21:51:04 crc kubenswrapper[5014]: I1006 21:51:04.890837 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d74142f-3a0a-4a47-99ce-be7ecac62f76-kube-api-access-z9p9c" (OuterVolumeSpecName: "kube-api-access-z9p9c") pod "8d74142f-3a0a-4a47-99ce-be7ecac62f76" (UID: "8d74142f-3a0a-4a47-99ce-be7ecac62f76"). InnerVolumeSpecName "kube-api-access-z9p9c". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:51:04 crc kubenswrapper[5014]: I1006 21:51:04.908840 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-scripts" (OuterVolumeSpecName: "scripts") pod "8d74142f-3a0a-4a47-99ce-be7ecac62f76" (UID: "8d74142f-3a0a-4a47-99ce-be7ecac62f76"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:04 crc kubenswrapper[5014]: I1006 21:51:04.927567 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8d74142f-3a0a-4a47-99ce-be7ecac62f76" (UID: "8d74142f-3a0a-4a47-99ce-be7ecac62f76"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:04 crc kubenswrapper[5014]: I1006 21:51:04.936690 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-config-data" (OuterVolumeSpecName: "config-data") pod "8d74142f-3a0a-4a47-99ce-be7ecac62f76" (UID: "8d74142f-3a0a-4a47-99ce-be7ecac62f76"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:04 crc kubenswrapper[5014]: I1006 21:51:04.986599 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:04 crc kubenswrapper[5014]: I1006 21:51:04.986668 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:04 crc kubenswrapper[5014]: I1006 21:51:04.986682 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d74142f-3a0a-4a47-99ce-be7ecac62f76-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:04 crc kubenswrapper[5014]: I1006 21:51:04.986695 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9p9c\" (UniqueName: \"kubernetes.io/projected/8d74142f-3a0a-4a47-99ce-be7ecac62f76-kube-api-access-z9p9c\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.372200 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-z8mlq" event={"ID":"8d74142f-3a0a-4a47-99ce-be7ecac62f76","Type":"ContainerDied","Data":"d62b88ef37124e078284aef9016bc207cf513274a6f551712c4df83ea5771371"} Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.372261 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d62b88ef37124e078284aef9016bc207cf513274a6f551712c4df83ea5771371" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.372273 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-z8mlq" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.511162 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 06 21:51:05 crc kubenswrapper[5014]: E1006 21:51:05.511711 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d74142f-3a0a-4a47-99ce-be7ecac62f76" containerName="nova-cell0-conductor-db-sync" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.511741 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d74142f-3a0a-4a47-99ce-be7ecac62f76" containerName="nova-cell0-conductor-db-sync" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.512100 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d74142f-3a0a-4a47-99ce-be7ecac62f76" containerName="nova-cell0-conductor-db-sync" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.514929 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.521464 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.522601 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-5wwlh" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.532029 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.599846 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvbzs\" (UniqueName: \"kubernetes.io/projected/6ebf215b-a88f-4b08-8f2e-58284b7d4548-kube-api-access-cvbzs\") pod \"nova-cell0-conductor-0\" (UID: \"6ebf215b-a88f-4b08-8f2e-58284b7d4548\") " pod="openstack/nova-cell0-conductor-0" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.600135 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ebf215b-a88f-4b08-8f2e-58284b7d4548-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"6ebf215b-a88f-4b08-8f2e-58284b7d4548\") " pod="openstack/nova-cell0-conductor-0" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.600445 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ebf215b-a88f-4b08-8f2e-58284b7d4548-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"6ebf215b-a88f-4b08-8f2e-58284b7d4548\") " pod="openstack/nova-cell0-conductor-0" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.702811 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ebf215b-a88f-4b08-8f2e-58284b7d4548-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"6ebf215b-a88f-4b08-8f2e-58284b7d4548\") " pod="openstack/nova-cell0-conductor-0" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.703739 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvbzs\" (UniqueName: \"kubernetes.io/projected/6ebf215b-a88f-4b08-8f2e-58284b7d4548-kube-api-access-cvbzs\") pod \"nova-cell0-conductor-0\" (UID: \"6ebf215b-a88f-4b08-8f2e-58284b7d4548\") " pod="openstack/nova-cell0-conductor-0" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.703782 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ebf215b-a88f-4b08-8f2e-58284b7d4548-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"6ebf215b-a88f-4b08-8f2e-58284b7d4548\") " pod="openstack/nova-cell0-conductor-0" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.708188 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ebf215b-a88f-4b08-8f2e-58284b7d4548-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"6ebf215b-a88f-4b08-8f2e-58284b7d4548\") " pod="openstack/nova-cell0-conductor-0" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.708258 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ebf215b-a88f-4b08-8f2e-58284b7d4548-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"6ebf215b-a88f-4b08-8f2e-58284b7d4548\") " pod="openstack/nova-cell0-conductor-0" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.739111 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvbzs\" (UniqueName: \"kubernetes.io/projected/6ebf215b-a88f-4b08-8f2e-58284b7d4548-kube-api-access-cvbzs\") pod \"nova-cell0-conductor-0\" (UID: \"6ebf215b-a88f-4b08-8f2e-58284b7d4548\") " pod="openstack/nova-cell0-conductor-0" Oct 06 21:51:05 crc kubenswrapper[5014]: I1006 21:51:05.848539 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Oct 06 21:51:06 crc kubenswrapper[5014]: I1006 21:51:06.385511 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 06 21:51:07 crc kubenswrapper[5014]: I1006 21:51:07.394264 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"6ebf215b-a88f-4b08-8f2e-58284b7d4548","Type":"ContainerStarted","Data":"e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee"} Oct 06 21:51:07 crc kubenswrapper[5014]: I1006 21:51:07.394759 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Oct 06 21:51:07 crc kubenswrapper[5014]: I1006 21:51:07.394770 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"6ebf215b-a88f-4b08-8f2e-58284b7d4548","Type":"ContainerStarted","Data":"9dcf19a5822115a7b4fcf3b6501c38303e0234f8c503694ae4a102f7764efe47"} Oct 06 21:51:07 crc kubenswrapper[5014]: I1006 21:51:07.415449 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.415430779 podStartE2EDuration="2.415430779s" podCreationTimestamp="2025-10-06 21:51:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:51:07.413054553 +0000 UTC m=+1212.706091287" watchObservedRunningTime="2025-10-06 21:51:07.415430779 +0000 UTC m=+1212.708467513" Oct 06 21:51:15 crc kubenswrapper[5014]: I1006 21:51:15.905294 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.476597 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-bhl92"] Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.479415 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-bhl92" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.482433 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.482518 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.531950 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-bhl92"] Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.562748 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqznq\" (UniqueName: \"kubernetes.io/projected/b8f0d160-17d7-4876-925a-93b29c26847a-kube-api-access-bqznq\") pod \"nova-cell0-cell-mapping-bhl92\" (UID: \"b8f0d160-17d7-4876-925a-93b29c26847a\") " pod="openstack/nova-cell0-cell-mapping-bhl92" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.563246 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-config-data\") pod \"nova-cell0-cell-mapping-bhl92\" (UID: \"b8f0d160-17d7-4876-925a-93b29c26847a\") " pod="openstack/nova-cell0-cell-mapping-bhl92" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.563382 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-scripts\") pod \"nova-cell0-cell-mapping-bhl92\" (UID: \"b8f0d160-17d7-4876-925a-93b29c26847a\") " pod="openstack/nova-cell0-cell-mapping-bhl92" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.563550 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-bhl92\" (UID: \"b8f0d160-17d7-4876-925a-93b29c26847a\") " pod="openstack/nova-cell0-cell-mapping-bhl92" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.664056 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.665025 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-bhl92\" (UID: \"b8f0d160-17d7-4876-925a-93b29c26847a\") " pod="openstack/nova-cell0-cell-mapping-bhl92" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.665152 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqznq\" (UniqueName: \"kubernetes.io/projected/b8f0d160-17d7-4876-925a-93b29c26847a-kube-api-access-bqznq\") pod \"nova-cell0-cell-mapping-bhl92\" (UID: \"b8f0d160-17d7-4876-925a-93b29c26847a\") " pod="openstack/nova-cell0-cell-mapping-bhl92" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.665198 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-config-data\") pod \"nova-cell0-cell-mapping-bhl92\" (UID: \"b8f0d160-17d7-4876-925a-93b29c26847a\") " pod="openstack/nova-cell0-cell-mapping-bhl92" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.665222 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-scripts\") pod \"nova-cell0-cell-mapping-bhl92\" (UID: \"b8f0d160-17d7-4876-925a-93b29c26847a\") " pod="openstack/nova-cell0-cell-mapping-bhl92" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.665586 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.672125 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-scripts\") pod \"nova-cell0-cell-mapping-bhl92\" (UID: \"b8f0d160-17d7-4876-925a-93b29c26847a\") " pod="openstack/nova-cell0-cell-mapping-bhl92" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.674060 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.678183 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-config-data\") pod \"nova-cell0-cell-mapping-bhl92\" (UID: \"b8f0d160-17d7-4876-925a-93b29c26847a\") " pod="openstack/nova-cell0-cell-mapping-bhl92" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.686143 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-bhl92\" (UID: \"b8f0d160-17d7-4876-925a-93b29c26847a\") " pod="openstack/nova-cell0-cell-mapping-bhl92" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.688918 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqznq\" (UniqueName: \"kubernetes.io/projected/b8f0d160-17d7-4876-925a-93b29c26847a-kube-api-access-bqznq\") pod \"nova-cell0-cell-mapping-bhl92\" (UID: \"b8f0d160-17d7-4876-925a-93b29c26847a\") " pod="openstack/nova-cell0-cell-mapping-bhl92" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.724797 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.758573 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.760709 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.762479 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.770506 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/469ba97a-f5a9-4088-905b-6bb38bd12f25-config-data\") pod \"nova-scheduler-0\" (UID: \"469ba97a-f5a9-4088-905b-6bb38bd12f25\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.770635 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfv25\" (UniqueName: \"kubernetes.io/projected/469ba97a-f5a9-4088-905b-6bb38bd12f25-kube-api-access-gfv25\") pod \"nova-scheduler-0\" (UID: \"469ba97a-f5a9-4088-905b-6bb38bd12f25\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.770744 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/469ba97a-f5a9-4088-905b-6bb38bd12f25-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"469ba97a-f5a9-4088-905b-6bb38bd12f25\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.779633 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.817575 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-bhl92" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.853297 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.857354 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.863812 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.875756 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-logs\") pod \"nova-api-0\" (UID: \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\") " pod="openstack/nova-api-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.875817 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2wdf\" (UniqueName: \"kubernetes.io/projected/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-kube-api-access-f2wdf\") pod \"nova-api-0\" (UID: \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\") " pod="openstack/nova-api-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.875894 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/469ba97a-f5a9-4088-905b-6bb38bd12f25-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"469ba97a-f5a9-4088-905b-6bb38bd12f25\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.875945 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-config-data\") pod \"nova-api-0\" (UID: \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\") " pod="openstack/nova-api-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.876005 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\") " pod="openstack/nova-api-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.876031 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/469ba97a-f5a9-4088-905b-6bb38bd12f25-config-data\") pod \"nova-scheduler-0\" (UID: \"469ba97a-f5a9-4088-905b-6bb38bd12f25\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.876097 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfv25\" (UniqueName: \"kubernetes.io/projected/469ba97a-f5a9-4088-905b-6bb38bd12f25-kube-api-access-gfv25\") pod \"nova-scheduler-0\" (UID: \"469ba97a-f5a9-4088-905b-6bb38bd12f25\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.880397 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.890426 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/469ba97a-f5a9-4088-905b-6bb38bd12f25-config-data\") pod \"nova-scheduler-0\" (UID: \"469ba97a-f5a9-4088-905b-6bb38bd12f25\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.891166 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/469ba97a-f5a9-4088-905b-6bb38bd12f25-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"469ba97a-f5a9-4088-905b-6bb38bd12f25\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.926659 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.938015 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfv25\" (UniqueName: \"kubernetes.io/projected/469ba97a-f5a9-4088-905b-6bb38bd12f25-kube-api-access-gfv25\") pod \"nova-scheduler-0\" (UID: \"469ba97a-f5a9-4088-905b-6bb38bd12f25\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.954209 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.974644 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.979002 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-config-data\") pod \"nova-api-0\" (UID: \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\") " pod="openstack/nova-api-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.979053 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87709e2f-0587-47af-8c38-99a845d9de28-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"87709e2f-0587-47af-8c38-99a845d9de28\") " pod="openstack/nova-metadata-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.979082 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87709e2f-0587-47af-8c38-99a845d9de28-logs\") pod \"nova-metadata-0\" (UID: \"87709e2f-0587-47af-8c38-99a845d9de28\") " pod="openstack/nova-metadata-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.979104 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\") " pod="openstack/nova-api-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.979118 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87709e2f-0587-47af-8c38-99a845d9de28-config-data\") pod \"nova-metadata-0\" (UID: \"87709e2f-0587-47af-8c38-99a845d9de28\") " pod="openstack/nova-metadata-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.979168 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjsnj\" (UniqueName: \"kubernetes.io/projected/87709e2f-0587-47af-8c38-99a845d9de28-kube-api-access-sjsnj\") pod \"nova-metadata-0\" (UID: \"87709e2f-0587-47af-8c38-99a845d9de28\") " pod="openstack/nova-metadata-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.979208 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2wdf\" (UniqueName: \"kubernetes.io/projected/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-kube-api-access-f2wdf\") pod \"nova-api-0\" (UID: \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\") " pod="openstack/nova-api-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.979225 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-logs\") pod \"nova-api-0\" (UID: \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\") " pod="openstack/nova-api-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.979695 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-logs\") pod \"nova-api-0\" (UID: \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\") " pod="openstack/nova-api-0" Oct 06 21:51:16 crc kubenswrapper[5014]: I1006 21:51:16.987950 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-config-data\") pod \"nova-api-0\" (UID: \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\") " pod="openstack/nova-api-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.012418 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\") " pod="openstack/nova-api-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.048105 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2wdf\" (UniqueName: \"kubernetes.io/projected/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-kube-api-access-f2wdf\") pod \"nova-api-0\" (UID: \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\") " pod="openstack/nova-api-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.085199 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-64d8d96789-x75b8"] Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.091255 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.091709 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8735a7a-5287-4c79-ac48-8d03dc88b146-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a8735a7a-5287-4c79-ac48-8d03dc88b146\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.092554 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8735a7a-5287-4c79-ac48-8d03dc88b146-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a8735a7a-5287-4c79-ac48-8d03dc88b146\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.093340 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87709e2f-0587-47af-8c38-99a845d9de28-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"87709e2f-0587-47af-8c38-99a845d9de28\") " pod="openstack/nova-metadata-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.093414 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87709e2f-0587-47af-8c38-99a845d9de28-logs\") pod \"nova-metadata-0\" (UID: \"87709e2f-0587-47af-8c38-99a845d9de28\") " pod="openstack/nova-metadata-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.093456 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87709e2f-0587-47af-8c38-99a845d9de28-config-data\") pod \"nova-metadata-0\" (UID: \"87709e2f-0587-47af-8c38-99a845d9de28\") " pod="openstack/nova-metadata-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.093897 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87709e2f-0587-47af-8c38-99a845d9de28-logs\") pod \"nova-metadata-0\" (UID: \"87709e2f-0587-47af-8c38-99a845d9de28\") " pod="openstack/nova-metadata-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.099530 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjsnj\" (UniqueName: \"kubernetes.io/projected/87709e2f-0587-47af-8c38-99a845d9de28-kube-api-access-sjsnj\") pod \"nova-metadata-0\" (UID: \"87709e2f-0587-47af-8c38-99a845d9de28\") " pod="openstack/nova-metadata-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.099804 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wd2f\" (UniqueName: \"kubernetes.io/projected/a8735a7a-5287-4c79-ac48-8d03dc88b146-kube-api-access-9wd2f\") pod \"nova-cell1-novncproxy-0\" (UID: \"a8735a7a-5287-4c79-ac48-8d03dc88b146\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.100375 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87709e2f-0587-47af-8c38-99a845d9de28-config-data\") pod \"nova-metadata-0\" (UID: \"87709e2f-0587-47af-8c38-99a845d9de28\") " pod="openstack/nova-metadata-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.104205 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87709e2f-0587-47af-8c38-99a845d9de28-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"87709e2f-0587-47af-8c38-99a845d9de28\") " pod="openstack/nova-metadata-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.106320 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.110956 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.126403 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.131417 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjsnj\" (UniqueName: \"kubernetes.io/projected/87709e2f-0587-47af-8c38-99a845d9de28-kube-api-access-sjsnj\") pod \"nova-metadata-0\" (UID: \"87709e2f-0587-47af-8c38-99a845d9de28\") " pod="openstack/nova-metadata-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.159748 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64d8d96789-x75b8"] Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.203108 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8735a7a-5287-4c79-ac48-8d03dc88b146-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a8735a7a-5287-4c79-ac48-8d03dc88b146\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.203161 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8735a7a-5287-4c79-ac48-8d03dc88b146-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a8735a7a-5287-4c79-ac48-8d03dc88b146\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.203192 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-ovsdbserver-nb\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.203211 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-config\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.203249 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-ovsdbserver-sb\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.203375 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fsbn\" (UniqueName: \"kubernetes.io/projected/486ed6a2-9dc8-49de-8601-09042e42e5b1-kube-api-access-4fsbn\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.203439 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wd2f\" (UniqueName: \"kubernetes.io/projected/a8735a7a-5287-4c79-ac48-8d03dc88b146-kube-api-access-9wd2f\") pod \"nova-cell1-novncproxy-0\" (UID: \"a8735a7a-5287-4c79-ac48-8d03dc88b146\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.203496 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-dns-svc\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.203527 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-dns-swift-storage-0\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.213904 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8735a7a-5287-4c79-ac48-8d03dc88b146-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a8735a7a-5287-4c79-ac48-8d03dc88b146\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.214468 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8735a7a-5287-4c79-ac48-8d03dc88b146-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a8735a7a-5287-4c79-ac48-8d03dc88b146\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.226161 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wd2f\" (UniqueName: \"kubernetes.io/projected/a8735a7a-5287-4c79-ac48-8d03dc88b146-kube-api-access-9wd2f\") pod \"nova-cell1-novncproxy-0\" (UID: \"a8735a7a-5287-4c79-ac48-8d03dc88b146\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.305478 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-ovsdbserver-nb\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.305524 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-config\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.305572 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-ovsdbserver-sb\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.305816 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fsbn\" (UniqueName: \"kubernetes.io/projected/486ed6a2-9dc8-49de-8601-09042e42e5b1-kube-api-access-4fsbn\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.305903 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-dns-svc\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.305939 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-dns-swift-storage-0\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.306999 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-dns-swift-storage-0\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.306996 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-ovsdbserver-sb\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.307495 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-ovsdbserver-nb\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.307893 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-dns-svc\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.308059 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-config\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.327941 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fsbn\" (UniqueName: \"kubernetes.io/projected/486ed6a2-9dc8-49de-8601-09042e42e5b1-kube-api-access-4fsbn\") pod \"dnsmasq-dns-64d8d96789-x75b8\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.347429 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.393406 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.414144 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.519351 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-bhl92"] Oct 06 21:51:17 crc kubenswrapper[5014]: W1006 21:51:17.536972 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb8f0d160_17d7_4876_925a_93b29c26847a.slice/crio-705df94622979094e1764546de1ee7d1ce2082012f14ff80223747bb7dc61fd1 WatchSource:0}: Error finding container 705df94622979094e1764546de1ee7d1ce2082012f14ff80223747bb7dc61fd1: Status 404 returned error can't find the container with id 705df94622979094e1764546de1ee7d1ce2082012f14ff80223747bb7dc61fd1 Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.643713 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.751008 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.828141 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-wnhrf"] Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.831213 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-wnhrf" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.836306 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-wnhrf"] Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.837009 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.837983 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.896907 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.917637 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mj652\" (UniqueName: \"kubernetes.io/projected/c5a46c2b-6455-4103-b35c-db8e3301d1e9-kube-api-access-mj652\") pod \"nova-cell1-conductor-db-sync-wnhrf\" (UID: \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\") " pod="openstack/nova-cell1-conductor-db-sync-wnhrf" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.917671 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-scripts\") pod \"nova-cell1-conductor-db-sync-wnhrf\" (UID: \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\") " pod="openstack/nova-cell1-conductor-db-sync-wnhrf" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.917817 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-config-data\") pod \"nova-cell1-conductor-db-sync-wnhrf\" (UID: \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\") " pod="openstack/nova-cell1-conductor-db-sync-wnhrf" Oct 06 21:51:17 crc kubenswrapper[5014]: I1006 21:51:17.917851 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-wnhrf\" (UID: \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\") " pod="openstack/nova-cell1-conductor-db-sync-wnhrf" Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.000585 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.020003 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-config-data\") pod \"nova-cell1-conductor-db-sync-wnhrf\" (UID: \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\") " pod="openstack/nova-cell1-conductor-db-sync-wnhrf" Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.020071 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-wnhrf\" (UID: \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\") " pod="openstack/nova-cell1-conductor-db-sync-wnhrf" Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.020164 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mj652\" (UniqueName: \"kubernetes.io/projected/c5a46c2b-6455-4103-b35c-db8e3301d1e9-kube-api-access-mj652\") pod \"nova-cell1-conductor-db-sync-wnhrf\" (UID: \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\") " pod="openstack/nova-cell1-conductor-db-sync-wnhrf" Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.020196 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-scripts\") pod \"nova-cell1-conductor-db-sync-wnhrf\" (UID: \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\") " pod="openstack/nova-cell1-conductor-db-sync-wnhrf" Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.028498 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-config-data\") pod \"nova-cell1-conductor-db-sync-wnhrf\" (UID: \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\") " pod="openstack/nova-cell1-conductor-db-sync-wnhrf" Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.029102 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-scripts\") pod \"nova-cell1-conductor-db-sync-wnhrf\" (UID: \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\") " pod="openstack/nova-cell1-conductor-db-sync-wnhrf" Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.031166 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-wnhrf\" (UID: \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\") " pod="openstack/nova-cell1-conductor-db-sync-wnhrf" Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.038113 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mj652\" (UniqueName: \"kubernetes.io/projected/c5a46c2b-6455-4103-b35c-db8e3301d1e9-kube-api-access-mj652\") pod \"nova-cell1-conductor-db-sync-wnhrf\" (UID: \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\") " pod="openstack/nova-cell1-conductor-db-sync-wnhrf" Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.114393 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64d8d96789-x75b8"] Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.163964 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-wnhrf" Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.543269 5014 generic.go:334] "Generic (PLEG): container finished" podID="486ed6a2-9dc8-49de-8601-09042e42e5b1" containerID="ca2e042630ca0fb08befe1a8638b57e312fd37f6bd86aa9c08da4b6cb0a9ea5a" exitCode=0 Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.544479 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64d8d96789-x75b8" event={"ID":"486ed6a2-9dc8-49de-8601-09042e42e5b1","Type":"ContainerDied","Data":"ca2e042630ca0fb08befe1a8638b57e312fd37f6bd86aa9c08da4b6cb0a9ea5a"} Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.544512 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64d8d96789-x75b8" event={"ID":"486ed6a2-9dc8-49de-8601-09042e42e5b1","Type":"ContainerStarted","Data":"11bd2ee53cde7311f83040a920d1edf02d0c41157fdf02d4c2d80b246f2d3d32"} Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.549286 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2f141e4f-d87c-41df-bb55-5a0cd723ae0b","Type":"ContainerStarted","Data":"14e2b8bc6094151c4869cd1e561105891b7196be424371d83bc788d2a56ef190"} Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.552919 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"469ba97a-f5a9-4088-905b-6bb38bd12f25","Type":"ContainerStarted","Data":"5d825a5f1dbd9363eca528934e6532b9ad9091cc89030889d0a2074bcca96132"} Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.555444 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"87709e2f-0587-47af-8c38-99a845d9de28","Type":"ContainerStarted","Data":"25cb8b8be0be9587cb2d469a7ef330e3f5b3aad503039f54ecc32a3284e480b7"} Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.559400 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-bhl92" event={"ID":"b8f0d160-17d7-4876-925a-93b29c26847a","Type":"ContainerStarted","Data":"e0108697276545e2fd0df481de9931a00ca8d8ecc01d4199b56e5ce3ba02591e"} Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.559445 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-bhl92" event={"ID":"b8f0d160-17d7-4876-925a-93b29c26847a","Type":"ContainerStarted","Data":"705df94622979094e1764546de1ee7d1ce2082012f14ff80223747bb7dc61fd1"} Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.577641 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a8735a7a-5287-4c79-ac48-8d03dc88b146","Type":"ContainerStarted","Data":"496e79c38dbbc237833cb6febcccdec6c48847ea5dc133725bcfda55f09dd54c"} Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.588327 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-bhl92" podStartSLOduration=2.588303969 podStartE2EDuration="2.588303969s" podCreationTimestamp="2025-10-06 21:51:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:51:18.587520684 +0000 UTC m=+1223.880557438" watchObservedRunningTime="2025-10-06 21:51:18.588303969 +0000 UTC m=+1223.881340703" Oct 06 21:51:18 crc kubenswrapper[5014]: I1006 21:51:18.685265 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-wnhrf"] Oct 06 21:51:18 crc kubenswrapper[5014]: W1006 21:51:18.696720 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5a46c2b_6455_4103_b35c_db8e3301d1e9.slice/crio-5a70ee710d527bec7931f172c4ac3d7c8fb4da89141ed227c9b1e4d54c193257 WatchSource:0}: Error finding container 5a70ee710d527bec7931f172c4ac3d7c8fb4da89141ed227c9b1e4d54c193257: Status 404 returned error can't find the container with id 5a70ee710d527bec7931f172c4ac3d7c8fb4da89141ed227c9b1e4d54c193257 Oct 06 21:51:19 crc kubenswrapper[5014]: I1006 21:51:19.608321 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-wnhrf" event={"ID":"c5a46c2b-6455-4103-b35c-db8e3301d1e9","Type":"ContainerStarted","Data":"7943ac158fc9c82b6786736f3eafcf3a5d3ac1281259d04c7563c98199d84d1d"} Oct 06 21:51:19 crc kubenswrapper[5014]: I1006 21:51:19.608687 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-wnhrf" event={"ID":"c5a46c2b-6455-4103-b35c-db8e3301d1e9","Type":"ContainerStarted","Data":"5a70ee710d527bec7931f172c4ac3d7c8fb4da89141ed227c9b1e4d54c193257"} Oct 06 21:51:19 crc kubenswrapper[5014]: I1006 21:51:19.621562 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64d8d96789-x75b8" event={"ID":"486ed6a2-9dc8-49de-8601-09042e42e5b1","Type":"ContainerStarted","Data":"2c01c113ed0c4688ed51a7ea51e16ece0de9c6f03bc70c0f7a9b90f07fcbcac6"} Oct 06 21:51:19 crc kubenswrapper[5014]: I1006 21:51:19.633210 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-wnhrf" podStartSLOduration=2.633186518 podStartE2EDuration="2.633186518s" podCreationTimestamp="2025-10-06 21:51:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:51:19.633013812 +0000 UTC m=+1224.926050546" watchObservedRunningTime="2025-10-06 21:51:19.633186518 +0000 UTC m=+1224.926223252" Oct 06 21:51:19 crc kubenswrapper[5014]: I1006 21:51:19.665643 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-64d8d96789-x75b8" podStartSLOduration=3.665612467 podStartE2EDuration="3.665612467s" podCreationTimestamp="2025-10-06 21:51:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:51:19.655227397 +0000 UTC m=+1224.948264131" watchObservedRunningTime="2025-10-06 21:51:19.665612467 +0000 UTC m=+1224.958649201" Oct 06 21:51:20 crc kubenswrapper[5014]: I1006 21:51:20.632224 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:20 crc kubenswrapper[5014]: I1006 21:51:20.719603 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:51:20 crc kubenswrapper[5014]: I1006 21:51:20.748708 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 06 21:51:21 crc kubenswrapper[5014]: I1006 21:51:21.648993 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2f141e4f-d87c-41df-bb55-5a0cd723ae0b","Type":"ContainerStarted","Data":"5387587dd2e9ecece03a233025f6a291123cee61b607fbfaa8b934939fbda252"} Oct 06 21:51:21 crc kubenswrapper[5014]: I1006 21:51:21.650926 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"469ba97a-f5a9-4088-905b-6bb38bd12f25","Type":"ContainerStarted","Data":"add99e5fc96d183c112587a75e881f3d7742ef72d7a1e64f8c16891f201e97c2"} Oct 06 21:51:21 crc kubenswrapper[5014]: I1006 21:51:21.658125 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"87709e2f-0587-47af-8c38-99a845d9de28","Type":"ContainerStarted","Data":"38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c"} Oct 06 21:51:21 crc kubenswrapper[5014]: I1006 21:51:21.668381 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="a8735a7a-5287-4c79-ac48-8d03dc88b146" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://1656c7d6e0b91ae4a8c5e642556f1c2602908fcbd49f5364d3850728954bf952" gracePeriod=30 Oct 06 21:51:21 crc kubenswrapper[5014]: I1006 21:51:21.668774 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a8735a7a-5287-4c79-ac48-8d03dc88b146","Type":"ContainerStarted","Data":"1656c7d6e0b91ae4a8c5e642556f1c2602908fcbd49f5364d3850728954bf952"} Oct 06 21:51:21 crc kubenswrapper[5014]: I1006 21:51:21.669690 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.217399752 podStartE2EDuration="5.669656535s" podCreationTimestamp="2025-10-06 21:51:16 +0000 UTC" firstStartedPulling="2025-10-06 21:51:17.66666287 +0000 UTC m=+1222.959699604" lastFinishedPulling="2025-10-06 21:51:21.118919613 +0000 UTC m=+1226.411956387" observedRunningTime="2025-10-06 21:51:21.668958903 +0000 UTC m=+1226.961995637" watchObservedRunningTime="2025-10-06 21:51:21.669656535 +0000 UTC m=+1226.962693289" Oct 06 21:51:21 crc kubenswrapper[5014]: I1006 21:51:21.695743 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.575415041 podStartE2EDuration="5.695718762s" podCreationTimestamp="2025-10-06 21:51:16 +0000 UTC" firstStartedPulling="2025-10-06 21:51:18.00582899 +0000 UTC m=+1223.298865744" lastFinishedPulling="2025-10-06 21:51:21.126132721 +0000 UTC m=+1226.419169465" observedRunningTime="2025-10-06 21:51:21.685342062 +0000 UTC m=+1226.978378786" watchObservedRunningTime="2025-10-06 21:51:21.695718762 +0000 UTC m=+1226.988755496" Oct 06 21:51:21 crc kubenswrapper[5014]: I1006 21:51:21.734823 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:51:21 crc kubenswrapper[5014]: I1006 21:51:21.734867 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:51:21 crc kubenswrapper[5014]: I1006 21:51:21.734904 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:51:21 crc kubenswrapper[5014]: I1006 21:51:21.735560 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"26989bcf2919a89c6621b6af8723e17c2272f8a4a0f8b3ba02a253747e518022"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 21:51:21 crc kubenswrapper[5014]: I1006 21:51:21.735609 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://26989bcf2919a89c6621b6af8723e17c2272f8a4a0f8b3ba02a253747e518022" gracePeriod=600 Oct 06 21:51:22 crc kubenswrapper[5014]: I1006 21:51:22.107782 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Oct 06 21:51:22 crc kubenswrapper[5014]: I1006 21:51:22.394805 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:22 crc kubenswrapper[5014]: I1006 21:51:22.677664 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"87709e2f-0587-47af-8c38-99a845d9de28","Type":"ContainerStarted","Data":"d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c"} Oct 06 21:51:22 crc kubenswrapper[5014]: I1006 21:51:22.677784 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="87709e2f-0587-47af-8c38-99a845d9de28" containerName="nova-metadata-log" containerID="cri-o://38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c" gracePeriod=30 Oct 06 21:51:22 crc kubenswrapper[5014]: I1006 21:51:22.677879 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="87709e2f-0587-47af-8c38-99a845d9de28" containerName="nova-metadata-metadata" containerID="cri-o://d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c" gracePeriod=30 Oct 06 21:51:22 crc kubenswrapper[5014]: I1006 21:51:22.689461 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="26989bcf2919a89c6621b6af8723e17c2272f8a4a0f8b3ba02a253747e518022" exitCode=0 Oct 06 21:51:22 crc kubenswrapper[5014]: I1006 21:51:22.689515 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"26989bcf2919a89c6621b6af8723e17c2272f8a4a0f8b3ba02a253747e518022"} Oct 06 21:51:22 crc kubenswrapper[5014]: I1006 21:51:22.689542 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"cbdf259ebb7c14fd9e76d97e1e3dfaef9e8de4a47de87f8694acba8d0b7b3bc4"} Oct 06 21:51:22 crc kubenswrapper[5014]: I1006 21:51:22.689560 5014 scope.go:117] "RemoveContainer" containerID="067aa2b7990476eda8f037f84b90fe4366def589071101170672b653a6809121" Oct 06 21:51:22 crc kubenswrapper[5014]: I1006 21:51:22.699165 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2f141e4f-d87c-41df-bb55-5a0cd723ae0b","Type":"ContainerStarted","Data":"0aa631a9526ed6a862997049466c2018a288111e31e5ce1889371bfd7dabfdf9"} Oct 06 21:51:22 crc kubenswrapper[5014]: I1006 21:51:22.702463 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.480352309 podStartE2EDuration="6.702445261s" podCreationTimestamp="2025-10-06 21:51:16 +0000 UTC" firstStartedPulling="2025-10-06 21:51:17.902958926 +0000 UTC m=+1223.195995660" lastFinishedPulling="2025-10-06 21:51:21.125051858 +0000 UTC m=+1226.418088612" observedRunningTime="2025-10-06 21:51:22.699993193 +0000 UTC m=+1227.993029927" watchObservedRunningTime="2025-10-06 21:51:22.702445261 +0000 UTC m=+1227.995481995" Oct 06 21:51:22 crc kubenswrapper[5014]: I1006 21:51:22.731257 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.375153932 podStartE2EDuration="6.731236964s" podCreationTimestamp="2025-10-06 21:51:16 +0000 UTC" firstStartedPulling="2025-10-06 21:51:17.768923794 +0000 UTC m=+1223.061960528" lastFinishedPulling="2025-10-06 21:51:21.125006826 +0000 UTC m=+1226.418043560" observedRunningTime="2025-10-06 21:51:22.719362368 +0000 UTC m=+1228.012399102" watchObservedRunningTime="2025-10-06 21:51:22.731236964 +0000 UTC m=+1228.024273698" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.313725 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.445458 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87709e2f-0587-47af-8c38-99a845d9de28-config-data\") pod \"87709e2f-0587-47af-8c38-99a845d9de28\" (UID: \"87709e2f-0587-47af-8c38-99a845d9de28\") " Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.445557 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87709e2f-0587-47af-8c38-99a845d9de28-combined-ca-bundle\") pod \"87709e2f-0587-47af-8c38-99a845d9de28\" (UID: \"87709e2f-0587-47af-8c38-99a845d9de28\") " Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.445577 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87709e2f-0587-47af-8c38-99a845d9de28-logs\") pod \"87709e2f-0587-47af-8c38-99a845d9de28\" (UID: \"87709e2f-0587-47af-8c38-99a845d9de28\") " Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.445649 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjsnj\" (UniqueName: \"kubernetes.io/projected/87709e2f-0587-47af-8c38-99a845d9de28-kube-api-access-sjsnj\") pod \"87709e2f-0587-47af-8c38-99a845d9de28\" (UID: \"87709e2f-0587-47af-8c38-99a845d9de28\") " Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.445939 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87709e2f-0587-47af-8c38-99a845d9de28-logs" (OuterVolumeSpecName: "logs") pod "87709e2f-0587-47af-8c38-99a845d9de28" (UID: "87709e2f-0587-47af-8c38-99a845d9de28"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.446475 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87709e2f-0587-47af-8c38-99a845d9de28-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.455931 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87709e2f-0587-47af-8c38-99a845d9de28-kube-api-access-sjsnj" (OuterVolumeSpecName: "kube-api-access-sjsnj") pod "87709e2f-0587-47af-8c38-99a845d9de28" (UID: "87709e2f-0587-47af-8c38-99a845d9de28"). InnerVolumeSpecName "kube-api-access-sjsnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.482805 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87709e2f-0587-47af-8c38-99a845d9de28-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "87709e2f-0587-47af-8c38-99a845d9de28" (UID: "87709e2f-0587-47af-8c38-99a845d9de28"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.486680 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87709e2f-0587-47af-8c38-99a845d9de28-config-data" (OuterVolumeSpecName: "config-data") pod "87709e2f-0587-47af-8c38-99a845d9de28" (UID: "87709e2f-0587-47af-8c38-99a845d9de28"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.548041 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87709e2f-0587-47af-8c38-99a845d9de28-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.548072 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87709e2f-0587-47af-8c38-99a845d9de28-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.548085 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjsnj\" (UniqueName: \"kubernetes.io/projected/87709e2f-0587-47af-8c38-99a845d9de28-kube-api-access-sjsnj\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.708359 5014 generic.go:334] "Generic (PLEG): container finished" podID="87709e2f-0587-47af-8c38-99a845d9de28" containerID="d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c" exitCode=0 Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.708392 5014 generic.go:334] "Generic (PLEG): container finished" podID="87709e2f-0587-47af-8c38-99a845d9de28" containerID="38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c" exitCode=143 Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.708427 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"87709e2f-0587-47af-8c38-99a845d9de28","Type":"ContainerDied","Data":"d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c"} Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.708451 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"87709e2f-0587-47af-8c38-99a845d9de28","Type":"ContainerDied","Data":"38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c"} Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.708460 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"87709e2f-0587-47af-8c38-99a845d9de28","Type":"ContainerDied","Data":"25cb8b8be0be9587cb2d469a7ef330e3f5b3aad503039f54ecc32a3284e480b7"} Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.708474 5014 scope.go:117] "RemoveContainer" containerID="d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.708566 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.739158 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.749257 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.751579 5014 scope.go:117] "RemoveContainer" containerID="38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.775072 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:51:23 crc kubenswrapper[5014]: E1006 21:51:23.775759 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87709e2f-0587-47af-8c38-99a845d9de28" containerName="nova-metadata-metadata" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.775771 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="87709e2f-0587-47af-8c38-99a845d9de28" containerName="nova-metadata-metadata" Oct 06 21:51:23 crc kubenswrapper[5014]: E1006 21:51:23.775819 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87709e2f-0587-47af-8c38-99a845d9de28" containerName="nova-metadata-log" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.775826 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="87709e2f-0587-47af-8c38-99a845d9de28" containerName="nova-metadata-log" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.776006 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="87709e2f-0587-47af-8c38-99a845d9de28" containerName="nova-metadata-log" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.776031 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="87709e2f-0587-47af-8c38-99a845d9de28" containerName="nova-metadata-metadata" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.777009 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.781899 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.781983 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.785302 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.785825 5014 scope.go:117] "RemoveContainer" containerID="d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c" Oct 06 21:51:23 crc kubenswrapper[5014]: E1006 21:51:23.787597 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c\": container with ID starting with d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c not found: ID does not exist" containerID="d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.787648 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c"} err="failed to get container status \"d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c\": rpc error: code = NotFound desc = could not find container \"d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c\": container with ID starting with d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c not found: ID does not exist" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.787671 5014 scope.go:117] "RemoveContainer" containerID="38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c" Oct 06 21:51:23 crc kubenswrapper[5014]: E1006 21:51:23.790918 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c\": container with ID starting with 38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c not found: ID does not exist" containerID="38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.790938 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c"} err="failed to get container status \"38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c\": rpc error: code = NotFound desc = could not find container \"38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c\": container with ID starting with 38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c not found: ID does not exist" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.790954 5014 scope.go:117] "RemoveContainer" containerID="d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.792172 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c"} err="failed to get container status \"d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c\": rpc error: code = NotFound desc = could not find container \"d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c\": container with ID starting with d5a8bebdfe18839c344e8529015935fb4790feca91f9d660eada7d05f0ef140c not found: ID does not exist" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.792220 5014 scope.go:117] "RemoveContainer" containerID="38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.795862 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c"} err="failed to get container status \"38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c\": rpc error: code = NotFound desc = could not find container \"38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c\": container with ID starting with 38548ffc40cd69c4457c7b1583585fc43b029e42af9a457dd4b3f3c62956fd3c not found: ID does not exist" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.854179 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.854562 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-config-data\") pod \"nova-metadata-0\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.854714 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5v78\" (UniqueName: \"kubernetes.io/projected/f60e8449-4af3-4cac-a1a5-85a007ea5279-kube-api-access-b5v78\") pod \"nova-metadata-0\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.854841 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f60e8449-4af3-4cac-a1a5-85a007ea5279-logs\") pod \"nova-metadata-0\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.855196 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.957920 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-config-data\") pod \"nova-metadata-0\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.957978 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5v78\" (UniqueName: \"kubernetes.io/projected/f60e8449-4af3-4cac-a1a5-85a007ea5279-kube-api-access-b5v78\") pod \"nova-metadata-0\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.958013 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f60e8449-4af3-4cac-a1a5-85a007ea5279-logs\") pod \"nova-metadata-0\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.958136 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.958169 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.958664 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f60e8449-4af3-4cac-a1a5-85a007ea5279-logs\") pod \"nova-metadata-0\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.963336 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-config-data\") pod \"nova-metadata-0\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.964278 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.964729 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " pod="openstack/nova-metadata-0" Oct 06 21:51:23 crc kubenswrapper[5014]: I1006 21:51:23.977924 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5v78\" (UniqueName: \"kubernetes.io/projected/f60e8449-4af3-4cac-a1a5-85a007ea5279-kube-api-access-b5v78\") pod \"nova-metadata-0\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " pod="openstack/nova-metadata-0" Oct 06 21:51:24 crc kubenswrapper[5014]: I1006 21:51:24.117888 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 06 21:51:24 crc kubenswrapper[5014]: I1006 21:51:24.613944 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:51:24 crc kubenswrapper[5014]: W1006 21:51:24.621414 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf60e8449_4af3_4cac_a1a5_85a007ea5279.slice/crio-b44c1e80d0582ecfb467ee20cc05c14ba7697401a9d612c4be5bee9aa438344f WatchSource:0}: Error finding container b44c1e80d0582ecfb467ee20cc05c14ba7697401a9d612c4be5bee9aa438344f: Status 404 returned error can't find the container with id b44c1e80d0582ecfb467ee20cc05c14ba7697401a9d612c4be5bee9aa438344f Oct 06 21:51:24 crc kubenswrapper[5014]: I1006 21:51:24.732850 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f60e8449-4af3-4cac-a1a5-85a007ea5279","Type":"ContainerStarted","Data":"b44c1e80d0582ecfb467ee20cc05c14ba7697401a9d612c4be5bee9aa438344f"} Oct 06 21:51:25 crc kubenswrapper[5014]: I1006 21:51:25.507900 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87709e2f-0587-47af-8c38-99a845d9de28" path="/var/lib/kubelet/pods/87709e2f-0587-47af-8c38-99a845d9de28/volumes" Oct 06 21:51:25 crc kubenswrapper[5014]: I1006 21:51:25.745460 5014 generic.go:334] "Generic (PLEG): container finished" podID="b8f0d160-17d7-4876-925a-93b29c26847a" containerID="e0108697276545e2fd0df481de9931a00ca8d8ecc01d4199b56e5ce3ba02591e" exitCode=0 Oct 06 21:51:25 crc kubenswrapper[5014]: I1006 21:51:25.745526 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-bhl92" event={"ID":"b8f0d160-17d7-4876-925a-93b29c26847a","Type":"ContainerDied","Data":"e0108697276545e2fd0df481de9931a00ca8d8ecc01d4199b56e5ce3ba02591e"} Oct 06 21:51:25 crc kubenswrapper[5014]: I1006 21:51:25.747795 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f60e8449-4af3-4cac-a1a5-85a007ea5279","Type":"ContainerStarted","Data":"82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52"} Oct 06 21:51:25 crc kubenswrapper[5014]: I1006 21:51:25.747817 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f60e8449-4af3-4cac-a1a5-85a007ea5279","Type":"ContainerStarted","Data":"670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7"} Oct 06 21:51:25 crc kubenswrapper[5014]: I1006 21:51:25.792124 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.792099311 podStartE2EDuration="2.792099311s" podCreationTimestamp="2025-10-06 21:51:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:51:25.784790599 +0000 UTC m=+1231.077827333" watchObservedRunningTime="2025-10-06 21:51:25.792099311 +0000 UTC m=+1231.085136035" Oct 06 21:51:26 crc kubenswrapper[5014]: I1006 21:51:26.660056 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.099477 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-bhl92" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.107807 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.128086 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.128143 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.139664 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.247966 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqznq\" (UniqueName: \"kubernetes.io/projected/b8f0d160-17d7-4876-925a-93b29c26847a-kube-api-access-bqznq\") pod \"b8f0d160-17d7-4876-925a-93b29c26847a\" (UID: \"b8f0d160-17d7-4876-925a-93b29c26847a\") " Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.248023 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-combined-ca-bundle\") pod \"b8f0d160-17d7-4876-925a-93b29c26847a\" (UID: \"b8f0d160-17d7-4876-925a-93b29c26847a\") " Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.248156 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-config-data\") pod \"b8f0d160-17d7-4876-925a-93b29c26847a\" (UID: \"b8f0d160-17d7-4876-925a-93b29c26847a\") " Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.248307 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-scripts\") pod \"b8f0d160-17d7-4876-925a-93b29c26847a\" (UID: \"b8f0d160-17d7-4876-925a-93b29c26847a\") " Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.255729 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-scripts" (OuterVolumeSpecName: "scripts") pod "b8f0d160-17d7-4876-925a-93b29c26847a" (UID: "b8f0d160-17d7-4876-925a-93b29c26847a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.258787 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8f0d160-17d7-4876-925a-93b29c26847a-kube-api-access-bqznq" (OuterVolumeSpecName: "kube-api-access-bqznq") pod "b8f0d160-17d7-4876-925a-93b29c26847a" (UID: "b8f0d160-17d7-4876-925a-93b29c26847a"). InnerVolumeSpecName "kube-api-access-bqznq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.287219 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-config-data" (OuterVolumeSpecName: "config-data") pod "b8f0d160-17d7-4876-925a-93b29c26847a" (UID: "b8f0d160-17d7-4876-925a-93b29c26847a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.300979 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b8f0d160-17d7-4876-925a-93b29c26847a" (UID: "b8f0d160-17d7-4876-925a-93b29c26847a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.350610 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.350683 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.350694 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqznq\" (UniqueName: \"kubernetes.io/projected/b8f0d160-17d7-4876-925a-93b29c26847a-kube-api-access-bqznq\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.350706 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8f0d160-17d7-4876-925a-93b29c26847a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.415794 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.481412 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59b9656b65-wz49g"] Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.481675 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-59b9656b65-wz49g" podUID="92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8" containerName="dnsmasq-dns" containerID="cri-o://7941c65573392cc10a3090841b4d9629ca34fea0c3ee8c74bf69e96d3b8ee435" gracePeriod=10 Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.767808 5014 generic.go:334] "Generic (PLEG): container finished" podID="92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8" containerID="7941c65573392cc10a3090841b4d9629ca34fea0c3ee8c74bf69e96d3b8ee435" exitCode=0 Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.767872 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59b9656b65-wz49g" event={"ID":"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8","Type":"ContainerDied","Data":"7941c65573392cc10a3090841b4d9629ca34fea0c3ee8c74bf69e96d3b8ee435"} Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.769752 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-bhl92" event={"ID":"b8f0d160-17d7-4876-925a-93b29c26847a","Type":"ContainerDied","Data":"705df94622979094e1764546de1ee7d1ce2082012f14ff80223747bb7dc61fd1"} Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.769777 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="705df94622979094e1764546de1ee7d1ce2082012f14ff80223747bb7dc61fd1" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.769785 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-bhl92" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.771542 5014 generic.go:334] "Generic (PLEG): container finished" podID="c5a46c2b-6455-4103-b35c-db8e3301d1e9" containerID="7943ac158fc9c82b6786736f3eafcf3a5d3ac1281259d04c7563c98199d84d1d" exitCode=0 Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.772397 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-wnhrf" event={"ID":"c5a46c2b-6455-4103-b35c-db8e3301d1e9","Type":"ContainerDied","Data":"7943ac158fc9c82b6786736f3eafcf3a5d3ac1281259d04c7563c98199d84d1d"} Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.824866 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.912236 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.960866 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hmmct\" (UniqueName: \"kubernetes.io/projected/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-kube-api-access-hmmct\") pod \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.961006 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-ovsdbserver-nb\") pod \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.961034 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-ovsdbserver-sb\") pod \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.961692 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-dns-svc\") pod \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.961748 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-dns-swift-storage-0\") pod \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.961818 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-config\") pod \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\" (UID: \"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8\") " Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.966806 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-kube-api-access-hmmct" (OuterVolumeSpecName: "kube-api-access-hmmct") pod "92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8" (UID: "92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8"). InnerVolumeSpecName "kube-api-access-hmmct". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.970456 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.971266 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2f141e4f-d87c-41df-bb55-5a0cd723ae0b" containerName="nova-api-api" containerID="cri-o://0aa631a9526ed6a862997049466c2018a288111e31e5ce1889371bfd7dabfdf9" gracePeriod=30 Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.971371 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2f141e4f-d87c-41df-bb55-5a0cd723ae0b" containerName="nova-api-log" containerID="cri-o://5387587dd2e9ecece03a233025f6a291123cee61b607fbfaa8b934939fbda252" gracePeriod=30 Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.978976 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2f141e4f-d87c-41df-bb55-5a0cd723ae0b" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.186:8774/\": EOF" Oct 06 21:51:27 crc kubenswrapper[5014]: I1006 21:51:27.979030 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2f141e4f-d87c-41df-bb55-5a0cd723ae0b" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.186:8774/\": EOF" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.053050 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.053463 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="f60e8449-4af3-4cac-a1a5-85a007ea5279" containerName="nova-metadata-log" containerID="cri-o://670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7" gracePeriod=30 Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.054146 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="f60e8449-4af3-4cac-a1a5-85a007ea5279" containerName="nova-metadata-metadata" containerID="cri-o://82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52" gracePeriod=30 Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.064699 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hmmct\" (UniqueName: \"kubernetes.io/projected/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-kube-api-access-hmmct\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.086572 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-config" (OuterVolumeSpecName: "config") pod "92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8" (UID: "92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.111505 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8" (UID: "92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.128226 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8" (UID: "92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.156406 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8" (UID: "92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.171849 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.171882 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.171892 5014 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.171900 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.194426 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8" (UID: "92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.274037 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.325962 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.583977 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.680067 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f60e8449-4af3-4cac-a1a5-85a007ea5279-logs\") pod \"f60e8449-4af3-4cac-a1a5-85a007ea5279\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.680289 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-nova-metadata-tls-certs\") pod \"f60e8449-4af3-4cac-a1a5-85a007ea5279\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.680310 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-config-data\") pod \"f60e8449-4af3-4cac-a1a5-85a007ea5279\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.680339 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-combined-ca-bundle\") pod \"f60e8449-4af3-4cac-a1a5-85a007ea5279\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.680446 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5v78\" (UniqueName: \"kubernetes.io/projected/f60e8449-4af3-4cac-a1a5-85a007ea5279-kube-api-access-b5v78\") pod \"f60e8449-4af3-4cac-a1a5-85a007ea5279\" (UID: \"f60e8449-4af3-4cac-a1a5-85a007ea5279\") " Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.681533 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f60e8449-4af3-4cac-a1a5-85a007ea5279-logs" (OuterVolumeSpecName: "logs") pod "f60e8449-4af3-4cac-a1a5-85a007ea5279" (UID: "f60e8449-4af3-4cac-a1a5-85a007ea5279"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.690167 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f60e8449-4af3-4cac-a1a5-85a007ea5279-kube-api-access-b5v78" (OuterVolumeSpecName: "kube-api-access-b5v78") pod "f60e8449-4af3-4cac-a1a5-85a007ea5279" (UID: "f60e8449-4af3-4cac-a1a5-85a007ea5279"). InnerVolumeSpecName "kube-api-access-b5v78". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.715898 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f60e8449-4af3-4cac-a1a5-85a007ea5279" (UID: "f60e8449-4af3-4cac-a1a5-85a007ea5279"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.726653 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-config-data" (OuterVolumeSpecName: "config-data") pod "f60e8449-4af3-4cac-a1a5-85a007ea5279" (UID: "f60e8449-4af3-4cac-a1a5-85a007ea5279"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.743312 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "f60e8449-4af3-4cac-a1a5-85a007ea5279" (UID: "f60e8449-4af3-4cac-a1a5-85a007ea5279"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.782351 5014 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.782388 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.782399 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f60e8449-4af3-4cac-a1a5-85a007ea5279-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.782409 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b5v78\" (UniqueName: \"kubernetes.io/projected/f60e8449-4af3-4cac-a1a5-85a007ea5279-kube-api-access-b5v78\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.782418 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f60e8449-4af3-4cac-a1a5-85a007ea5279-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.783176 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59b9656b65-wz49g" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.789446 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59b9656b65-wz49g" event={"ID":"92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8","Type":"ContainerDied","Data":"8aa827338ce58ab0b3b7b862bc4a3dd8dc38fb2af9c230977528ab0ca3bb22f6"} Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.789528 5014 scope.go:117] "RemoveContainer" containerID="7941c65573392cc10a3090841b4d9629ca34fea0c3ee8c74bf69e96d3b8ee435" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.798310 5014 generic.go:334] "Generic (PLEG): container finished" podID="2f141e4f-d87c-41df-bb55-5a0cd723ae0b" containerID="5387587dd2e9ecece03a233025f6a291123cee61b607fbfaa8b934939fbda252" exitCode=143 Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.798392 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2f141e4f-d87c-41df-bb55-5a0cd723ae0b","Type":"ContainerDied","Data":"5387587dd2e9ecece03a233025f6a291123cee61b607fbfaa8b934939fbda252"} Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.802871 5014 generic.go:334] "Generic (PLEG): container finished" podID="f60e8449-4af3-4cac-a1a5-85a007ea5279" containerID="82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52" exitCode=0 Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.802906 5014 generic.go:334] "Generic (PLEG): container finished" podID="f60e8449-4af3-4cac-a1a5-85a007ea5279" containerID="670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7" exitCode=143 Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.803505 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.803698 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f60e8449-4af3-4cac-a1a5-85a007ea5279","Type":"ContainerDied","Data":"82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52"} Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.803757 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f60e8449-4af3-4cac-a1a5-85a007ea5279","Type":"ContainerDied","Data":"670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7"} Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.803770 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f60e8449-4af3-4cac-a1a5-85a007ea5279","Type":"ContainerDied","Data":"b44c1e80d0582ecfb467ee20cc05c14ba7697401a9d612c4be5bee9aa438344f"} Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.816847 5014 scope.go:117] "RemoveContainer" containerID="105d7977d75a49d72314a402e90a068ea4d2d435cc3f786e02d4fc6e0dbe59c6" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.824006 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59b9656b65-wz49g"] Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.833996 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-59b9656b65-wz49g"] Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.846269 5014 scope.go:117] "RemoveContainer" containerID="82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.852954 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.870436 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.883347 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:51:28 crc kubenswrapper[5014]: E1006 21:51:28.884022 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8" containerName="init" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.884039 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8" containerName="init" Oct 06 21:51:28 crc kubenswrapper[5014]: E1006 21:51:28.884056 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8" containerName="dnsmasq-dns" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.884062 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8" containerName="dnsmasq-dns" Oct 06 21:51:28 crc kubenswrapper[5014]: E1006 21:51:28.884076 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f60e8449-4af3-4cac-a1a5-85a007ea5279" containerName="nova-metadata-metadata" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.884082 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="f60e8449-4af3-4cac-a1a5-85a007ea5279" containerName="nova-metadata-metadata" Oct 06 21:51:28 crc kubenswrapper[5014]: E1006 21:51:28.884091 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f60e8449-4af3-4cac-a1a5-85a007ea5279" containerName="nova-metadata-log" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.884098 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="f60e8449-4af3-4cac-a1a5-85a007ea5279" containerName="nova-metadata-log" Oct 06 21:51:28 crc kubenswrapper[5014]: E1006 21:51:28.884126 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8f0d160-17d7-4876-925a-93b29c26847a" containerName="nova-manage" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.884131 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8f0d160-17d7-4876-925a-93b29c26847a" containerName="nova-manage" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.884306 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8" containerName="dnsmasq-dns" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.884319 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="f60e8449-4af3-4cac-a1a5-85a007ea5279" containerName="nova-metadata-log" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.884330 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8f0d160-17d7-4876-925a-93b29c26847a" containerName="nova-manage" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.884341 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="f60e8449-4af3-4cac-a1a5-85a007ea5279" containerName="nova-metadata-metadata" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.885538 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.888722 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.888926 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.892814 5014 scope.go:117] "RemoveContainer" containerID="670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.913453 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.941393 5014 scope.go:117] "RemoveContainer" containerID="82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52" Oct 06 21:51:28 crc kubenswrapper[5014]: E1006 21:51:28.942180 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52\": container with ID starting with 82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52 not found: ID does not exist" containerID="82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.942239 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52"} err="failed to get container status \"82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52\": rpc error: code = NotFound desc = could not find container \"82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52\": container with ID starting with 82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52 not found: ID does not exist" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.942278 5014 scope.go:117] "RemoveContainer" containerID="670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7" Oct 06 21:51:28 crc kubenswrapper[5014]: E1006 21:51:28.942783 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7\": container with ID starting with 670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7 not found: ID does not exist" containerID="670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.942847 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7"} err="failed to get container status \"670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7\": rpc error: code = NotFound desc = could not find container \"670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7\": container with ID starting with 670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7 not found: ID does not exist" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.942895 5014 scope.go:117] "RemoveContainer" containerID="82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.943434 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52"} err="failed to get container status \"82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52\": rpc error: code = NotFound desc = could not find container \"82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52\": container with ID starting with 82f8a24fe3aee8727e594b6bedf5457cc0436723bb2c66ae60e721fc1c73ce52 not found: ID does not exist" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.943455 5014 scope.go:117] "RemoveContainer" containerID="670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.943804 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7"} err="failed to get container status \"670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7\": rpc error: code = NotFound desc = could not find container \"670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7\": container with ID starting with 670d7e9fd05e6ce878fed0eb1f9ea0adfbed1b5cb9efed41dc87587bbd6e69b7 not found: ID does not exist" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.986730 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " pod="openstack/nova-metadata-0" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.986783 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " pod="openstack/nova-metadata-0" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.986875 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8585acea-0fdc-4307-b39c-f98d2d50f03b-logs\") pod \"nova-metadata-0\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " pod="openstack/nova-metadata-0" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.986902 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7hpc\" (UniqueName: \"kubernetes.io/projected/8585acea-0fdc-4307-b39c-f98d2d50f03b-kube-api-access-j7hpc\") pod \"nova-metadata-0\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " pod="openstack/nova-metadata-0" Oct 06 21:51:28 crc kubenswrapper[5014]: I1006 21:51:28.986956 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-config-data\") pod \"nova-metadata-0\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " pod="openstack/nova-metadata-0" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.092527 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " pod="openstack/nova-metadata-0" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.092847 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " pod="openstack/nova-metadata-0" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.095569 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8585acea-0fdc-4307-b39c-f98d2d50f03b-logs\") pod \"nova-metadata-0\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " pod="openstack/nova-metadata-0" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.095640 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7hpc\" (UniqueName: \"kubernetes.io/projected/8585acea-0fdc-4307-b39c-f98d2d50f03b-kube-api-access-j7hpc\") pod \"nova-metadata-0\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " pod="openstack/nova-metadata-0" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.095773 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-config-data\") pod \"nova-metadata-0\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " pod="openstack/nova-metadata-0" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.097435 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8585acea-0fdc-4307-b39c-f98d2d50f03b-logs\") pod \"nova-metadata-0\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " pod="openstack/nova-metadata-0" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.097906 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " pod="openstack/nova-metadata-0" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.098051 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " pod="openstack/nova-metadata-0" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.101306 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-config-data\") pod \"nova-metadata-0\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " pod="openstack/nova-metadata-0" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.115156 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7hpc\" (UniqueName: \"kubernetes.io/projected/8585acea-0fdc-4307-b39c-f98d2d50f03b-kube-api-access-j7hpc\") pod \"nova-metadata-0\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " pod="openstack/nova-metadata-0" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.205951 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-wnhrf" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.214425 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.299584 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-config-data\") pod \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\" (UID: \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\") " Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.299697 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mj652\" (UniqueName: \"kubernetes.io/projected/c5a46c2b-6455-4103-b35c-db8e3301d1e9-kube-api-access-mj652\") pod \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\" (UID: \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\") " Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.299779 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-combined-ca-bundle\") pod \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\" (UID: \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\") " Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.299884 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-scripts\") pod \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\" (UID: \"c5a46c2b-6455-4103-b35c-db8e3301d1e9\") " Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.304786 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-scripts" (OuterVolumeSpecName: "scripts") pod "c5a46c2b-6455-4103-b35c-db8e3301d1e9" (UID: "c5a46c2b-6455-4103-b35c-db8e3301d1e9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.308803 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5a46c2b-6455-4103-b35c-db8e3301d1e9-kube-api-access-mj652" (OuterVolumeSpecName: "kube-api-access-mj652") pod "c5a46c2b-6455-4103-b35c-db8e3301d1e9" (UID: "c5a46c2b-6455-4103-b35c-db8e3301d1e9"). InnerVolumeSpecName "kube-api-access-mj652". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.347785 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c5a46c2b-6455-4103-b35c-db8e3301d1e9" (UID: "c5a46c2b-6455-4103-b35c-db8e3301d1e9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.350052 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-config-data" (OuterVolumeSpecName: "config-data") pod "c5a46c2b-6455-4103-b35c-db8e3301d1e9" (UID: "c5a46c2b-6455-4103-b35c-db8e3301d1e9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.402509 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.402539 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mj652\" (UniqueName: \"kubernetes.io/projected/c5a46c2b-6455-4103-b35c-db8e3301d1e9-kube-api-access-mj652\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.402550 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.402558 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5a46c2b-6455-4103-b35c-db8e3301d1e9-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.502713 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8" path="/var/lib/kubelet/pods/92468d0e-94cb-4ca5-ad93-f8d6e7b9e2a8/volumes" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.503911 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f60e8449-4af3-4cac-a1a5-85a007ea5279" path="/var/lib/kubelet/pods/f60e8449-4af3-4cac-a1a5-85a007ea5279/volumes" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.673942 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.844248 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-wnhrf" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.844279 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-wnhrf" event={"ID":"c5a46c2b-6455-4103-b35c-db8e3301d1e9","Type":"ContainerDied","Data":"5a70ee710d527bec7931f172c4ac3d7c8fb4da89141ed227c9b1e4d54c193257"} Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.844319 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a70ee710d527bec7931f172c4ac3d7c8fb4da89141ed227c9b1e4d54c193257" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.858348 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8585acea-0fdc-4307-b39c-f98d2d50f03b","Type":"ContainerStarted","Data":"3cc32cc11df57713d2f0c869d7991655573a9987934687034e1040719c0f4388"} Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.858403 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8585acea-0fdc-4307-b39c-f98d2d50f03b","Type":"ContainerStarted","Data":"76ac17fa038545a5a76ef4bc4107a7b4c1f5e6b4a6c3a631b9fd2cb34902e80f"} Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.866891 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="469ba97a-f5a9-4088-905b-6bb38bd12f25" containerName="nova-scheduler-scheduler" containerID="cri-o://add99e5fc96d183c112587a75e881f3d7742ef72d7a1e64f8c16891f201e97c2" gracePeriod=30 Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.883647 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 06 21:51:29 crc kubenswrapper[5014]: E1006 21:51:29.884074 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5a46c2b-6455-4103-b35c-db8e3301d1e9" containerName="nova-cell1-conductor-db-sync" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.884091 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5a46c2b-6455-4103-b35c-db8e3301d1e9" containerName="nova-cell1-conductor-db-sync" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.884294 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5a46c2b-6455-4103-b35c-db8e3301d1e9" containerName="nova-cell1-conductor-db-sync" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.884967 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.902341 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.916984 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7de90fe7-747a-4334-be2a-d3b5ee6b8148-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"7de90fe7-747a-4334-be2a-d3b5ee6b8148\") " pod="openstack/nova-cell1-conductor-0" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.917068 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7de90fe7-747a-4334-be2a-d3b5ee6b8148-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"7de90fe7-747a-4334-be2a-d3b5ee6b8148\") " pod="openstack/nova-cell1-conductor-0" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.917144 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6bmz\" (UniqueName: \"kubernetes.io/projected/7de90fe7-747a-4334-be2a-d3b5ee6b8148-kube-api-access-p6bmz\") pod \"nova-cell1-conductor-0\" (UID: \"7de90fe7-747a-4334-be2a-d3b5ee6b8148\") " pod="openstack/nova-cell1-conductor-0" Oct 06 21:51:29 crc kubenswrapper[5014]: I1006 21:51:29.927172 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 06 21:51:30 crc kubenswrapper[5014]: I1006 21:51:30.019018 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7de90fe7-747a-4334-be2a-d3b5ee6b8148-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"7de90fe7-747a-4334-be2a-d3b5ee6b8148\") " pod="openstack/nova-cell1-conductor-0" Oct 06 21:51:30 crc kubenswrapper[5014]: I1006 21:51:30.019100 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6bmz\" (UniqueName: \"kubernetes.io/projected/7de90fe7-747a-4334-be2a-d3b5ee6b8148-kube-api-access-p6bmz\") pod \"nova-cell1-conductor-0\" (UID: \"7de90fe7-747a-4334-be2a-d3b5ee6b8148\") " pod="openstack/nova-cell1-conductor-0" Oct 06 21:51:30 crc kubenswrapper[5014]: I1006 21:51:30.019266 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7de90fe7-747a-4334-be2a-d3b5ee6b8148-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"7de90fe7-747a-4334-be2a-d3b5ee6b8148\") " pod="openstack/nova-cell1-conductor-0" Oct 06 21:51:30 crc kubenswrapper[5014]: I1006 21:51:30.024176 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7de90fe7-747a-4334-be2a-d3b5ee6b8148-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"7de90fe7-747a-4334-be2a-d3b5ee6b8148\") " pod="openstack/nova-cell1-conductor-0" Oct 06 21:51:30 crc kubenswrapper[5014]: I1006 21:51:30.032256 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7de90fe7-747a-4334-be2a-d3b5ee6b8148-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"7de90fe7-747a-4334-be2a-d3b5ee6b8148\") " pod="openstack/nova-cell1-conductor-0" Oct 06 21:51:30 crc kubenswrapper[5014]: I1006 21:51:30.041232 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6bmz\" (UniqueName: \"kubernetes.io/projected/7de90fe7-747a-4334-be2a-d3b5ee6b8148-kube-api-access-p6bmz\") pod \"nova-cell1-conductor-0\" (UID: \"7de90fe7-747a-4334-be2a-d3b5ee6b8148\") " pod="openstack/nova-cell1-conductor-0" Oct 06 21:51:30 crc kubenswrapper[5014]: I1006 21:51:30.230828 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Oct 06 21:51:30 crc kubenswrapper[5014]: I1006 21:51:30.702127 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 06 21:51:30 crc kubenswrapper[5014]: I1006 21:51:30.880146 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"7de90fe7-747a-4334-be2a-d3b5ee6b8148","Type":"ContainerStarted","Data":"5a7f15bf6b0fcbdd2b4dfd00fd05e8dfa2b0f035fdab7de942b9739ae9111317"} Oct 06 21:51:30 crc kubenswrapper[5014]: I1006 21:51:30.882865 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8585acea-0fdc-4307-b39c-f98d2d50f03b","Type":"ContainerStarted","Data":"1e6fbd1229c040b8597fa85ce50f7ab2492b64339a12b813195106004bd67cf2"} Oct 06 21:51:30 crc kubenswrapper[5014]: I1006 21:51:30.909684 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.909661201 podStartE2EDuration="2.909661201s" podCreationTimestamp="2025-10-06 21:51:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:51:30.902995046 +0000 UTC m=+1236.196031780" watchObservedRunningTime="2025-10-06 21:51:30.909661201 +0000 UTC m=+1236.202697935" Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.255589 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.256128 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="81112a98-d817-4bf4-bb1e-288cb62e8577" containerName="kube-state-metrics" containerID="cri-o://96444f2a32fb59138cfc1e622d46a7508491a379c1bf7d6e3ed57a0b844c47c6" gracePeriod=30 Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.748084 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.860181 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqfmm\" (UniqueName: \"kubernetes.io/projected/81112a98-d817-4bf4-bb1e-288cb62e8577-kube-api-access-nqfmm\") pod \"81112a98-d817-4bf4-bb1e-288cb62e8577\" (UID: \"81112a98-d817-4bf4-bb1e-288cb62e8577\") " Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.866179 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81112a98-d817-4bf4-bb1e-288cb62e8577-kube-api-access-nqfmm" (OuterVolumeSpecName: "kube-api-access-nqfmm") pod "81112a98-d817-4bf4-bb1e-288cb62e8577" (UID: "81112a98-d817-4bf4-bb1e-288cb62e8577"). InnerVolumeSpecName "kube-api-access-nqfmm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.894063 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"7de90fe7-747a-4334-be2a-d3b5ee6b8148","Type":"ContainerStarted","Data":"3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370"} Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.894754 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.896681 5014 generic.go:334] "Generic (PLEG): container finished" podID="81112a98-d817-4bf4-bb1e-288cb62e8577" containerID="96444f2a32fb59138cfc1e622d46a7508491a379c1bf7d6e3ed57a0b844c47c6" exitCode=2 Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.896735 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"81112a98-d817-4bf4-bb1e-288cb62e8577","Type":"ContainerDied","Data":"96444f2a32fb59138cfc1e622d46a7508491a379c1bf7d6e3ed57a0b844c47c6"} Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.896783 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"81112a98-d817-4bf4-bb1e-288cb62e8577","Type":"ContainerDied","Data":"f1220b7365b3ea434ec51cf42b3defeecd4c4fdd220439a9793ea3b746225024"} Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.896805 5014 scope.go:117] "RemoveContainer" containerID="96444f2a32fb59138cfc1e622d46a7508491a379c1bf7d6e3ed57a0b844c47c6" Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.897119 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.921307 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.921281905 podStartE2EDuration="2.921281905s" podCreationTimestamp="2025-10-06 21:51:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:51:31.914875679 +0000 UTC m=+1237.207912413" watchObservedRunningTime="2025-10-06 21:51:31.921281905 +0000 UTC m=+1237.214318639" Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.930873 5014 scope.go:117] "RemoveContainer" containerID="96444f2a32fb59138cfc1e622d46a7508491a379c1bf7d6e3ed57a0b844c47c6" Oct 06 21:51:31 crc kubenswrapper[5014]: E1006 21:51:31.931462 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96444f2a32fb59138cfc1e622d46a7508491a379c1bf7d6e3ed57a0b844c47c6\": container with ID starting with 96444f2a32fb59138cfc1e622d46a7508491a379c1bf7d6e3ed57a0b844c47c6 not found: ID does not exist" containerID="96444f2a32fb59138cfc1e622d46a7508491a379c1bf7d6e3ed57a0b844c47c6" Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.931499 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96444f2a32fb59138cfc1e622d46a7508491a379c1bf7d6e3ed57a0b844c47c6"} err="failed to get container status \"96444f2a32fb59138cfc1e622d46a7508491a379c1bf7d6e3ed57a0b844c47c6\": rpc error: code = NotFound desc = could not find container \"96444f2a32fb59138cfc1e622d46a7508491a379c1bf7d6e3ed57a0b844c47c6\": container with ID starting with 96444f2a32fb59138cfc1e622d46a7508491a379c1bf7d6e3ed57a0b844c47c6 not found: ID does not exist" Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.953520 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.962729 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqfmm\" (UniqueName: \"kubernetes.io/projected/81112a98-d817-4bf4-bb1e-288cb62e8577-kube-api-access-nqfmm\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.965842 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.978662 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Oct 06 21:51:31 crc kubenswrapper[5014]: E1006 21:51:31.979104 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81112a98-d817-4bf4-bb1e-288cb62e8577" containerName="kube-state-metrics" Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.979134 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="81112a98-d817-4bf4-bb1e-288cb62e8577" containerName="kube-state-metrics" Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.979354 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="81112a98-d817-4bf4-bb1e-288cb62e8577" containerName="kube-state-metrics" Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.979992 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.983937 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.984078 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Oct 06 21:51:31 crc kubenswrapper[5014]: I1006 21:51:31.993770 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 06 21:51:32 crc kubenswrapper[5014]: I1006 21:51:32.063937 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75cpj\" (UniqueName: \"kubernetes.io/projected/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-api-access-75cpj\") pod \"kube-state-metrics-0\" (UID: \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\") " pod="openstack/kube-state-metrics-0" Oct 06 21:51:32 crc kubenswrapper[5014]: I1006 21:51:32.064072 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\") " pod="openstack/kube-state-metrics-0" Oct 06 21:51:32 crc kubenswrapper[5014]: I1006 21:51:32.064111 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\") " pod="openstack/kube-state-metrics-0" Oct 06 21:51:32 crc kubenswrapper[5014]: I1006 21:51:32.064168 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\") " pod="openstack/kube-state-metrics-0" Oct 06 21:51:32 crc kubenswrapper[5014]: E1006 21:51:32.112851 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="add99e5fc96d183c112587a75e881f3d7742ef72d7a1e64f8c16891f201e97c2" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 06 21:51:32 crc kubenswrapper[5014]: E1006 21:51:32.120117 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="add99e5fc96d183c112587a75e881f3d7742ef72d7a1e64f8c16891f201e97c2" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 06 21:51:32 crc kubenswrapper[5014]: E1006 21:51:32.124894 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="add99e5fc96d183c112587a75e881f3d7742ef72d7a1e64f8c16891f201e97c2" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 06 21:51:32 crc kubenswrapper[5014]: E1006 21:51:32.124958 5014 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="469ba97a-f5a9-4088-905b-6bb38bd12f25" containerName="nova-scheduler-scheduler" Oct 06 21:51:32 crc kubenswrapper[5014]: I1006 21:51:32.165636 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\") " pod="openstack/kube-state-metrics-0" Oct 06 21:51:32 crc kubenswrapper[5014]: I1006 21:51:32.165702 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75cpj\" (UniqueName: \"kubernetes.io/projected/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-api-access-75cpj\") pod \"kube-state-metrics-0\" (UID: \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\") " pod="openstack/kube-state-metrics-0" Oct 06 21:51:32 crc kubenswrapper[5014]: I1006 21:51:32.165805 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\") " pod="openstack/kube-state-metrics-0" Oct 06 21:51:32 crc kubenswrapper[5014]: I1006 21:51:32.165833 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\") " pod="openstack/kube-state-metrics-0" Oct 06 21:51:32 crc kubenswrapper[5014]: I1006 21:51:32.170960 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\") " pod="openstack/kube-state-metrics-0" Oct 06 21:51:32 crc kubenswrapper[5014]: I1006 21:51:32.171235 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\") " pod="openstack/kube-state-metrics-0" Oct 06 21:51:32 crc kubenswrapper[5014]: I1006 21:51:32.186518 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\") " pod="openstack/kube-state-metrics-0" Oct 06 21:51:32 crc kubenswrapper[5014]: I1006 21:51:32.194343 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75cpj\" (UniqueName: \"kubernetes.io/projected/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-api-access-75cpj\") pod \"kube-state-metrics-0\" (UID: \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\") " pod="openstack/kube-state-metrics-0" Oct 06 21:51:32 crc kubenswrapper[5014]: I1006 21:51:32.303265 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 06 21:51:32 crc kubenswrapper[5014]: I1006 21:51:32.759017 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 06 21:51:32 crc kubenswrapper[5014]: I1006 21:51:32.904827 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"24c4cd4c-297a-45d8-ad6f-e24e53736ecc","Type":"ContainerStarted","Data":"fa176ff3396b5dac4958fb54a4067be44d7861bf720692cc71185f34d2a1f4ac"} Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.143973 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.144528 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerName="ceilometer-central-agent" containerID="cri-o://76240dd94d6f9661e716d26feb123b9bb4a498809ee39ed2e4c4eed642fa31e1" gracePeriod=30 Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.144934 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerName="proxy-httpd" containerID="cri-o://e5ad15eb0aae365619d30dfc71dabab11ca5a0cc6a4f711c86f56a3713a08fb3" gracePeriod=30 Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.144978 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerName="ceilometer-notification-agent" containerID="cri-o://c276f25de93b08ef19f70714adc9dc42e6b05adfaa0940fd1d8c4f8855fd0945" gracePeriod=30 Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.144961 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerName="sg-core" containerID="cri-o://6e7e916ca9e9909b3ea0020a6575030777e32134b63488d9097d2871994c67d4" gracePeriod=30 Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.503395 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81112a98-d817-4bf4-bb1e-288cb62e8577" path="/var/lib/kubelet/pods/81112a98-d817-4bf4-bb1e-288cb62e8577/volumes" Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.509677 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.709230 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfv25\" (UniqueName: \"kubernetes.io/projected/469ba97a-f5a9-4088-905b-6bb38bd12f25-kube-api-access-gfv25\") pod \"469ba97a-f5a9-4088-905b-6bb38bd12f25\" (UID: \"469ba97a-f5a9-4088-905b-6bb38bd12f25\") " Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.709377 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/469ba97a-f5a9-4088-905b-6bb38bd12f25-combined-ca-bundle\") pod \"469ba97a-f5a9-4088-905b-6bb38bd12f25\" (UID: \"469ba97a-f5a9-4088-905b-6bb38bd12f25\") " Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.709456 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/469ba97a-f5a9-4088-905b-6bb38bd12f25-config-data\") pod \"469ba97a-f5a9-4088-905b-6bb38bd12f25\" (UID: \"469ba97a-f5a9-4088-905b-6bb38bd12f25\") " Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.719371 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/469ba97a-f5a9-4088-905b-6bb38bd12f25-kube-api-access-gfv25" (OuterVolumeSpecName: "kube-api-access-gfv25") pod "469ba97a-f5a9-4088-905b-6bb38bd12f25" (UID: "469ba97a-f5a9-4088-905b-6bb38bd12f25"). InnerVolumeSpecName "kube-api-access-gfv25". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.754760 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/469ba97a-f5a9-4088-905b-6bb38bd12f25-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "469ba97a-f5a9-4088-905b-6bb38bd12f25" (UID: "469ba97a-f5a9-4088-905b-6bb38bd12f25"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.759203 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/469ba97a-f5a9-4088-905b-6bb38bd12f25-config-data" (OuterVolumeSpecName: "config-data") pod "469ba97a-f5a9-4088-905b-6bb38bd12f25" (UID: "469ba97a-f5a9-4088-905b-6bb38bd12f25"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.812530 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfv25\" (UniqueName: \"kubernetes.io/projected/469ba97a-f5a9-4088-905b-6bb38bd12f25-kube-api-access-gfv25\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.812585 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/469ba97a-f5a9-4088-905b-6bb38bd12f25-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.812601 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/469ba97a-f5a9-4088-905b-6bb38bd12f25-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.833766 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.925466 5014 generic.go:334] "Generic (PLEG): container finished" podID="469ba97a-f5a9-4088-905b-6bb38bd12f25" containerID="add99e5fc96d183c112587a75e881f3d7742ef72d7a1e64f8c16891f201e97c2" exitCode=0 Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.925516 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"469ba97a-f5a9-4088-905b-6bb38bd12f25","Type":"ContainerDied","Data":"add99e5fc96d183c112587a75e881f3d7742ef72d7a1e64f8c16891f201e97c2"} Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.925970 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"469ba97a-f5a9-4088-905b-6bb38bd12f25","Type":"ContainerDied","Data":"5d825a5f1dbd9363eca528934e6532b9ad9091cc89030889d0a2074bcca96132"} Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.925988 5014 scope.go:117] "RemoveContainer" containerID="add99e5fc96d183c112587a75e881f3d7742ef72d7a1e64f8c16891f201e97c2" Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.925544 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.932138 5014 generic.go:334] "Generic (PLEG): container finished" podID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerID="e5ad15eb0aae365619d30dfc71dabab11ca5a0cc6a4f711c86f56a3713a08fb3" exitCode=0 Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.932173 5014 generic.go:334] "Generic (PLEG): container finished" podID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerID="6e7e916ca9e9909b3ea0020a6575030777e32134b63488d9097d2871994c67d4" exitCode=2 Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.932189 5014 generic.go:334] "Generic (PLEG): container finished" podID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerID="76240dd94d6f9661e716d26feb123b9bb4a498809ee39ed2e4c4eed642fa31e1" exitCode=0 Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.932237 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ee8201de-0efa-4162-9ccf-0ab77a414bab","Type":"ContainerDied","Data":"e5ad15eb0aae365619d30dfc71dabab11ca5a0cc6a4f711c86f56a3713a08fb3"} Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.932264 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ee8201de-0efa-4162-9ccf-0ab77a414bab","Type":"ContainerDied","Data":"6e7e916ca9e9909b3ea0020a6575030777e32134b63488d9097d2871994c67d4"} Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.932278 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ee8201de-0efa-4162-9ccf-0ab77a414bab","Type":"ContainerDied","Data":"76240dd94d6f9661e716d26feb123b9bb4a498809ee39ed2e4c4eed642fa31e1"} Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.936076 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"24c4cd4c-297a-45d8-ad6f-e24e53736ecc","Type":"ContainerStarted","Data":"9baa658cf1475d9f5420e5c02783c38281b57faef5be3f69ab55c29aa8529137"} Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.936402 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.940793 5014 generic.go:334] "Generic (PLEG): container finished" podID="2f141e4f-d87c-41df-bb55-5a0cd723ae0b" containerID="0aa631a9526ed6a862997049466c2018a288111e31e5ce1889371bfd7dabfdf9" exitCode=0 Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.940940 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2f141e4f-d87c-41df-bb55-5a0cd723ae0b","Type":"ContainerDied","Data":"0aa631a9526ed6a862997049466c2018a288111e31e5ce1889371bfd7dabfdf9"} Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.941112 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2f141e4f-d87c-41df-bb55-5a0cd723ae0b","Type":"ContainerDied","Data":"14e2b8bc6094151c4869cd1e561105891b7196be424371d83bc788d2a56ef190"} Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.941296 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.962291 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.615217214 podStartE2EDuration="2.962269576s" podCreationTimestamp="2025-10-06 21:51:31 +0000 UTC" firstStartedPulling="2025-10-06 21:51:32.765138919 +0000 UTC m=+1238.058175653" lastFinishedPulling="2025-10-06 21:51:33.112191281 +0000 UTC m=+1238.405228015" observedRunningTime="2025-10-06 21:51:33.948722519 +0000 UTC m=+1239.241759253" watchObservedRunningTime="2025-10-06 21:51:33.962269576 +0000 UTC m=+1239.255306310" Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.970772 5014 scope.go:117] "RemoveContainer" containerID="add99e5fc96d183c112587a75e881f3d7742ef72d7a1e64f8c16891f201e97c2" Oct 06 21:51:33 crc kubenswrapper[5014]: E1006 21:51:33.971204 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"add99e5fc96d183c112587a75e881f3d7742ef72d7a1e64f8c16891f201e97c2\": container with ID starting with add99e5fc96d183c112587a75e881f3d7742ef72d7a1e64f8c16891f201e97c2 not found: ID does not exist" containerID="add99e5fc96d183c112587a75e881f3d7742ef72d7a1e64f8c16891f201e97c2" Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.971239 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"add99e5fc96d183c112587a75e881f3d7742ef72d7a1e64f8c16891f201e97c2"} err="failed to get container status \"add99e5fc96d183c112587a75e881f3d7742ef72d7a1e64f8c16891f201e97c2\": rpc error: code = NotFound desc = could not find container \"add99e5fc96d183c112587a75e881f3d7742ef72d7a1e64f8c16891f201e97c2\": container with ID starting with add99e5fc96d183c112587a75e881f3d7742ef72d7a1e64f8c16891f201e97c2 not found: ID does not exist" Oct 06 21:51:33 crc kubenswrapper[5014]: I1006 21:51:33.971262 5014 scope.go:117] "RemoveContainer" containerID="0aa631a9526ed6a862997049466c2018a288111e31e5ce1889371bfd7dabfdf9" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.013962 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.016242 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-combined-ca-bundle\") pod \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\" (UID: \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\") " Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.016369 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-config-data\") pod \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\" (UID: \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\") " Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.016505 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-logs\") pod \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\" (UID: \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\") " Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.016548 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f2wdf\" (UniqueName: \"kubernetes.io/projected/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-kube-api-access-f2wdf\") pod \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\" (UID: \"2f141e4f-d87c-41df-bb55-5a0cd723ae0b\") " Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.019535 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-logs" (OuterVolumeSpecName: "logs") pod "2f141e4f-d87c-41df-bb55-5a0cd723ae0b" (UID: "2f141e4f-d87c-41df-bb55-5a0cd723ae0b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.019985 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.024292 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-kube-api-access-f2wdf" (OuterVolumeSpecName: "kube-api-access-f2wdf") pod "2f141e4f-d87c-41df-bb55-5a0cd723ae0b" (UID: "2f141e4f-d87c-41df-bb55-5a0cd723ae0b"). InnerVolumeSpecName "kube-api-access-f2wdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.025096 5014 scope.go:117] "RemoveContainer" containerID="5387587dd2e9ecece03a233025f6a291123cee61b607fbfaa8b934939fbda252" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.031923 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.044503 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:51:34 crc kubenswrapper[5014]: E1006 21:51:34.046404 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="469ba97a-f5a9-4088-905b-6bb38bd12f25" containerName="nova-scheduler-scheduler" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.046431 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="469ba97a-f5a9-4088-905b-6bb38bd12f25" containerName="nova-scheduler-scheduler" Oct 06 21:51:34 crc kubenswrapper[5014]: E1006 21:51:34.046472 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f141e4f-d87c-41df-bb55-5a0cd723ae0b" containerName="nova-api-api" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.046484 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f141e4f-d87c-41df-bb55-5a0cd723ae0b" containerName="nova-api-api" Oct 06 21:51:34 crc kubenswrapper[5014]: E1006 21:51:34.046517 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f141e4f-d87c-41df-bb55-5a0cd723ae0b" containerName="nova-api-log" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.046527 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f141e4f-d87c-41df-bb55-5a0cd723ae0b" containerName="nova-api-log" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.046823 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f141e4f-d87c-41df-bb55-5a0cd723ae0b" containerName="nova-api-log" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.046848 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f141e4f-d87c-41df-bb55-5a0cd723ae0b" containerName="nova-api-api" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.046858 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="469ba97a-f5a9-4088-905b-6bb38bd12f25" containerName="nova-scheduler-scheduler" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.047351 5014 scope.go:117] "RemoveContainer" containerID="0aa631a9526ed6a862997049466c2018a288111e31e5ce1889371bfd7dabfdf9" Oct 06 21:51:34 crc kubenswrapper[5014]: E1006 21:51:34.047906 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0aa631a9526ed6a862997049466c2018a288111e31e5ce1889371bfd7dabfdf9\": container with ID starting with 0aa631a9526ed6a862997049466c2018a288111e31e5ce1889371bfd7dabfdf9 not found: ID does not exist" containerID="0aa631a9526ed6a862997049466c2018a288111e31e5ce1889371bfd7dabfdf9" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.047950 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0aa631a9526ed6a862997049466c2018a288111e31e5ce1889371bfd7dabfdf9"} err="failed to get container status \"0aa631a9526ed6a862997049466c2018a288111e31e5ce1889371bfd7dabfdf9\": rpc error: code = NotFound desc = could not find container \"0aa631a9526ed6a862997049466c2018a288111e31e5ce1889371bfd7dabfdf9\": container with ID starting with 0aa631a9526ed6a862997049466c2018a288111e31e5ce1889371bfd7dabfdf9 not found: ID does not exist" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.047988 5014 scope.go:117] "RemoveContainer" containerID="5387587dd2e9ecece03a233025f6a291123cee61b607fbfaa8b934939fbda252" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.048079 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 06 21:51:34 crc kubenswrapper[5014]: E1006 21:51:34.048678 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5387587dd2e9ecece03a233025f6a291123cee61b607fbfaa8b934939fbda252\": container with ID starting with 5387587dd2e9ecece03a233025f6a291123cee61b607fbfaa8b934939fbda252 not found: ID does not exist" containerID="5387587dd2e9ecece03a233025f6a291123cee61b607fbfaa8b934939fbda252" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.048715 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5387587dd2e9ecece03a233025f6a291123cee61b607fbfaa8b934939fbda252"} err="failed to get container status \"5387587dd2e9ecece03a233025f6a291123cee61b607fbfaa8b934939fbda252\": rpc error: code = NotFound desc = could not find container \"5387587dd2e9ecece03a233025f6a291123cee61b607fbfaa8b934939fbda252\": container with ID starting with 5387587dd2e9ecece03a233025f6a291123cee61b607fbfaa8b934939fbda252 not found: ID does not exist" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.051488 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.052563 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2f141e4f-d87c-41df-bb55-5a0cd723ae0b" (UID: "2f141e4f-d87c-41df-bb55-5a0cd723ae0b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.055637 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.055976 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-config-data" (OuterVolumeSpecName: "config-data") pod "2f141e4f-d87c-41df-bb55-5a0cd723ae0b" (UID: "2f141e4f-d87c-41df-bb55-5a0cd723ae0b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.122916 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lgwm\" (UniqueName: \"kubernetes.io/projected/9887f684-7a28-4281-a5bb-eeff2f94685b-kube-api-access-6lgwm\") pod \"nova-scheduler-0\" (UID: \"9887f684-7a28-4281-a5bb-eeff2f94685b\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.123207 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9887f684-7a28-4281-a5bb-eeff2f94685b-config-data\") pod \"nova-scheduler-0\" (UID: \"9887f684-7a28-4281-a5bb-eeff2f94685b\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.123636 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9887f684-7a28-4281-a5bb-eeff2f94685b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9887f684-7a28-4281-a5bb-eeff2f94685b\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.123712 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f2wdf\" (UniqueName: \"kubernetes.io/projected/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-kube-api-access-f2wdf\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.123733 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.123744 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f141e4f-d87c-41df-bb55-5a0cd723ae0b-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.215114 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.215185 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.225868 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9887f684-7a28-4281-a5bb-eeff2f94685b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9887f684-7a28-4281-a5bb-eeff2f94685b\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.226036 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lgwm\" (UniqueName: \"kubernetes.io/projected/9887f684-7a28-4281-a5bb-eeff2f94685b-kube-api-access-6lgwm\") pod \"nova-scheduler-0\" (UID: \"9887f684-7a28-4281-a5bb-eeff2f94685b\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.226092 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9887f684-7a28-4281-a5bb-eeff2f94685b-config-data\") pod \"nova-scheduler-0\" (UID: \"9887f684-7a28-4281-a5bb-eeff2f94685b\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.230415 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9887f684-7a28-4281-a5bb-eeff2f94685b-config-data\") pod \"nova-scheduler-0\" (UID: \"9887f684-7a28-4281-a5bb-eeff2f94685b\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.230935 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9887f684-7a28-4281-a5bb-eeff2f94685b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9887f684-7a28-4281-a5bb-eeff2f94685b\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.247412 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lgwm\" (UniqueName: \"kubernetes.io/projected/9887f684-7a28-4281-a5bb-eeff2f94685b-kube-api-access-6lgwm\") pod \"nova-scheduler-0\" (UID: \"9887f684-7a28-4281-a5bb-eeff2f94685b\") " pod="openstack/nova-scheduler-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.344122 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.355108 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.365976 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.368546 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.371901 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.372201 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.383422 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.530959 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/636d6544-90a7-4c08-9498-2b7d25042ceb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"636d6544-90a7-4c08-9498-2b7d25042ceb\") " pod="openstack/nova-api-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.531377 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/636d6544-90a7-4c08-9498-2b7d25042ceb-config-data\") pod \"nova-api-0\" (UID: \"636d6544-90a7-4c08-9498-2b7d25042ceb\") " pod="openstack/nova-api-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.531409 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/636d6544-90a7-4c08-9498-2b7d25042ceb-logs\") pod \"nova-api-0\" (UID: \"636d6544-90a7-4c08-9498-2b7d25042ceb\") " pod="openstack/nova-api-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.531428 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ssd7\" (UniqueName: \"kubernetes.io/projected/636d6544-90a7-4c08-9498-2b7d25042ceb-kube-api-access-9ssd7\") pod \"nova-api-0\" (UID: \"636d6544-90a7-4c08-9498-2b7d25042ceb\") " pod="openstack/nova-api-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.632787 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/636d6544-90a7-4c08-9498-2b7d25042ceb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"636d6544-90a7-4c08-9498-2b7d25042ceb\") " pod="openstack/nova-api-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.632862 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/636d6544-90a7-4c08-9498-2b7d25042ceb-config-data\") pod \"nova-api-0\" (UID: \"636d6544-90a7-4c08-9498-2b7d25042ceb\") " pod="openstack/nova-api-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.632943 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/636d6544-90a7-4c08-9498-2b7d25042ceb-logs\") pod \"nova-api-0\" (UID: \"636d6544-90a7-4c08-9498-2b7d25042ceb\") " pod="openstack/nova-api-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.632979 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ssd7\" (UniqueName: \"kubernetes.io/projected/636d6544-90a7-4c08-9498-2b7d25042ceb-kube-api-access-9ssd7\") pod \"nova-api-0\" (UID: \"636d6544-90a7-4c08-9498-2b7d25042ceb\") " pod="openstack/nova-api-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.633453 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/636d6544-90a7-4c08-9498-2b7d25042ceb-logs\") pod \"nova-api-0\" (UID: \"636d6544-90a7-4c08-9498-2b7d25042ceb\") " pod="openstack/nova-api-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.640566 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/636d6544-90a7-4c08-9498-2b7d25042ceb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"636d6544-90a7-4c08-9498-2b7d25042ceb\") " pod="openstack/nova-api-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.643212 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/636d6544-90a7-4c08-9498-2b7d25042ceb-config-data\") pod \"nova-api-0\" (UID: \"636d6544-90a7-4c08-9498-2b7d25042ceb\") " pod="openstack/nova-api-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.650606 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ssd7\" (UniqueName: \"kubernetes.io/projected/636d6544-90a7-4c08-9498-2b7d25042ceb-kube-api-access-9ssd7\") pod \"nova-api-0\" (UID: \"636d6544-90a7-4c08-9498-2b7d25042ceb\") " pod="openstack/nova-api-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.689428 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.838905 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:51:34 crc kubenswrapper[5014]: W1006 21:51:34.850060 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9887f684_7a28_4281_a5bb_eeff2f94685b.slice/crio-a3f6fb045e8a90927e32a13ae015011e60668ab54b2f3b2747c2f6547a0120d8 WatchSource:0}: Error finding container a3f6fb045e8a90927e32a13ae015011e60668ab54b2f3b2747c2f6547a0120d8: Status 404 returned error can't find the container with id a3f6fb045e8a90927e32a13ae015011e60668ab54b2f3b2747c2f6547a0120d8 Oct 06 21:51:34 crc kubenswrapper[5014]: I1006 21:51:34.956856 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9887f684-7a28-4281-a5bb-eeff2f94685b","Type":"ContainerStarted","Data":"a3f6fb045e8a90927e32a13ae015011e60668ab54b2f3b2747c2f6547a0120d8"} Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.155394 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.274810 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.516128 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f141e4f-d87c-41df-bb55-5a0cd723ae0b" path="/var/lib/kubelet/pods/2f141e4f-d87c-41df-bb55-5a0cd723ae0b/volumes" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.517143 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="469ba97a-f5a9-4088-905b-6bb38bd12f25" path="/var/lib/kubelet/pods/469ba97a-f5a9-4088-905b-6bb38bd12f25/volumes" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.727558 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.862805 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-config-data\") pod \"ee8201de-0efa-4162-9ccf-0ab77a414bab\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.863403 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5sfpq\" (UniqueName: \"kubernetes.io/projected/ee8201de-0efa-4162-9ccf-0ab77a414bab-kube-api-access-5sfpq\") pod \"ee8201de-0efa-4162-9ccf-0ab77a414bab\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.863546 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-combined-ca-bundle\") pod \"ee8201de-0efa-4162-9ccf-0ab77a414bab\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.863579 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-scripts\") pod \"ee8201de-0efa-4162-9ccf-0ab77a414bab\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.863669 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-sg-core-conf-yaml\") pod \"ee8201de-0efa-4162-9ccf-0ab77a414bab\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.863702 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee8201de-0efa-4162-9ccf-0ab77a414bab-run-httpd\") pod \"ee8201de-0efa-4162-9ccf-0ab77a414bab\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.863800 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee8201de-0efa-4162-9ccf-0ab77a414bab-log-httpd\") pod \"ee8201de-0efa-4162-9ccf-0ab77a414bab\" (UID: \"ee8201de-0efa-4162-9ccf-0ab77a414bab\") " Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.864712 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee8201de-0efa-4162-9ccf-0ab77a414bab-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ee8201de-0efa-4162-9ccf-0ab77a414bab" (UID: "ee8201de-0efa-4162-9ccf-0ab77a414bab"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.866146 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee8201de-0efa-4162-9ccf-0ab77a414bab-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ee8201de-0efa-4162-9ccf-0ab77a414bab" (UID: "ee8201de-0efa-4162-9ccf-0ab77a414bab"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.872086 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-scripts" (OuterVolumeSpecName: "scripts") pod "ee8201de-0efa-4162-9ccf-0ab77a414bab" (UID: "ee8201de-0efa-4162-9ccf-0ab77a414bab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.874127 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee8201de-0efa-4162-9ccf-0ab77a414bab-kube-api-access-5sfpq" (OuterVolumeSpecName: "kube-api-access-5sfpq") pod "ee8201de-0efa-4162-9ccf-0ab77a414bab" (UID: "ee8201de-0efa-4162-9ccf-0ab77a414bab"). InnerVolumeSpecName "kube-api-access-5sfpq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.910296 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ee8201de-0efa-4162-9ccf-0ab77a414bab" (UID: "ee8201de-0efa-4162-9ccf-0ab77a414bab"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.957730 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee8201de-0efa-4162-9ccf-0ab77a414bab" (UID: "ee8201de-0efa-4162-9ccf-0ab77a414bab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.966250 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.966286 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.966297 5014 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.966307 5014 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee8201de-0efa-4162-9ccf-0ab77a414bab-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.966316 5014 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee8201de-0efa-4162-9ccf-0ab77a414bab-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.966325 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5sfpq\" (UniqueName: \"kubernetes.io/projected/ee8201de-0efa-4162-9ccf-0ab77a414bab-kube-api-access-5sfpq\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.967799 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9887f684-7a28-4281-a5bb-eeff2f94685b","Type":"ContainerStarted","Data":"f33b11cca1a82c7d2a724565d5e19118456d6015d8f46a5efe12985f45daec0c"} Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.969760 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"636d6544-90a7-4c08-9498-2b7d25042ceb","Type":"ContainerStarted","Data":"fa67180965643bb9ff78ec51f933dd7ec3dc55dd16054c3840ad4f0148bc3a71"} Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.969856 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"636d6544-90a7-4c08-9498-2b7d25042ceb","Type":"ContainerStarted","Data":"3562b1b9cd7e520c2a0faca1204b345de46d675084b62cb15fe88c8ae162dc6e"} Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.969925 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"636d6544-90a7-4c08-9498-2b7d25042ceb","Type":"ContainerStarted","Data":"a01eecd3417a59e813aca32ffaa906f5548a3be4bddf3cb233aa19b807547a62"} Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.984232 5014 generic.go:334] "Generic (PLEG): container finished" podID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerID="c276f25de93b08ef19f70714adc9dc42e6b05adfaa0940fd1d8c4f8855fd0945" exitCode=0 Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.984652 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ee8201de-0efa-4162-9ccf-0ab77a414bab","Type":"ContainerDied","Data":"c276f25de93b08ef19f70714adc9dc42e6b05adfaa0940fd1d8c4f8855fd0945"} Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.984752 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ee8201de-0efa-4162-9ccf-0ab77a414bab","Type":"ContainerDied","Data":"3db34027d3353c0790a5c2e7a1e28d8e46abe94a7ecfb705d807e343e95d0f2d"} Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.984867 5014 scope.go:117] "RemoveContainer" containerID="e5ad15eb0aae365619d30dfc71dabab11ca5a0cc6a4f711c86f56a3713a08fb3" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.985130 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:51:35 crc kubenswrapper[5014]: I1006 21:51:35.985601 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.985580156 podStartE2EDuration="2.985580156s" podCreationTimestamp="2025-10-06 21:51:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:51:35.981014739 +0000 UTC m=+1241.274051473" watchObservedRunningTime="2025-10-06 21:51:35.985580156 +0000 UTC m=+1241.278616880" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.003305 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-config-data" (OuterVolumeSpecName: "config-data") pod "ee8201de-0efa-4162-9ccf-0ab77a414bab" (UID: "ee8201de-0efa-4162-9ccf-0ab77a414bab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.022174 5014 scope.go:117] "RemoveContainer" containerID="6e7e916ca9e9909b3ea0020a6575030777e32134b63488d9097d2871994c67d4" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.029074 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.029046728 podStartE2EDuration="2.029046728s" podCreationTimestamp="2025-10-06 21:51:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:51:35.997940455 +0000 UTC m=+1241.290977179" watchObservedRunningTime="2025-10-06 21:51:36.029046728 +0000 UTC m=+1241.322083462" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.060296 5014 scope.go:117] "RemoveContainer" containerID="c276f25de93b08ef19f70714adc9dc42e6b05adfaa0940fd1d8c4f8855fd0945" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.067597 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee8201de-0efa-4162-9ccf-0ab77a414bab-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.084728 5014 scope.go:117] "RemoveContainer" containerID="76240dd94d6f9661e716d26feb123b9bb4a498809ee39ed2e4c4eed642fa31e1" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.104830 5014 scope.go:117] "RemoveContainer" containerID="e5ad15eb0aae365619d30dfc71dabab11ca5a0cc6a4f711c86f56a3713a08fb3" Oct 06 21:51:36 crc kubenswrapper[5014]: E1006 21:51:36.109934 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5ad15eb0aae365619d30dfc71dabab11ca5a0cc6a4f711c86f56a3713a08fb3\": container with ID starting with e5ad15eb0aae365619d30dfc71dabab11ca5a0cc6a4f711c86f56a3713a08fb3 not found: ID does not exist" containerID="e5ad15eb0aae365619d30dfc71dabab11ca5a0cc6a4f711c86f56a3713a08fb3" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.109982 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5ad15eb0aae365619d30dfc71dabab11ca5a0cc6a4f711c86f56a3713a08fb3"} err="failed to get container status \"e5ad15eb0aae365619d30dfc71dabab11ca5a0cc6a4f711c86f56a3713a08fb3\": rpc error: code = NotFound desc = could not find container \"e5ad15eb0aae365619d30dfc71dabab11ca5a0cc6a4f711c86f56a3713a08fb3\": container with ID starting with e5ad15eb0aae365619d30dfc71dabab11ca5a0cc6a4f711c86f56a3713a08fb3 not found: ID does not exist" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.110011 5014 scope.go:117] "RemoveContainer" containerID="6e7e916ca9e9909b3ea0020a6575030777e32134b63488d9097d2871994c67d4" Oct 06 21:51:36 crc kubenswrapper[5014]: E1006 21:51:36.110375 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e7e916ca9e9909b3ea0020a6575030777e32134b63488d9097d2871994c67d4\": container with ID starting with 6e7e916ca9e9909b3ea0020a6575030777e32134b63488d9097d2871994c67d4 not found: ID does not exist" containerID="6e7e916ca9e9909b3ea0020a6575030777e32134b63488d9097d2871994c67d4" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.110401 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e7e916ca9e9909b3ea0020a6575030777e32134b63488d9097d2871994c67d4"} err="failed to get container status \"6e7e916ca9e9909b3ea0020a6575030777e32134b63488d9097d2871994c67d4\": rpc error: code = NotFound desc = could not find container \"6e7e916ca9e9909b3ea0020a6575030777e32134b63488d9097d2871994c67d4\": container with ID starting with 6e7e916ca9e9909b3ea0020a6575030777e32134b63488d9097d2871994c67d4 not found: ID does not exist" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.110419 5014 scope.go:117] "RemoveContainer" containerID="c276f25de93b08ef19f70714adc9dc42e6b05adfaa0940fd1d8c4f8855fd0945" Oct 06 21:51:36 crc kubenswrapper[5014]: E1006 21:51:36.110841 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c276f25de93b08ef19f70714adc9dc42e6b05adfaa0940fd1d8c4f8855fd0945\": container with ID starting with c276f25de93b08ef19f70714adc9dc42e6b05adfaa0940fd1d8c4f8855fd0945 not found: ID does not exist" containerID="c276f25de93b08ef19f70714adc9dc42e6b05adfaa0940fd1d8c4f8855fd0945" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.110870 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c276f25de93b08ef19f70714adc9dc42e6b05adfaa0940fd1d8c4f8855fd0945"} err="failed to get container status \"c276f25de93b08ef19f70714adc9dc42e6b05adfaa0940fd1d8c4f8855fd0945\": rpc error: code = NotFound desc = could not find container \"c276f25de93b08ef19f70714adc9dc42e6b05adfaa0940fd1d8c4f8855fd0945\": container with ID starting with c276f25de93b08ef19f70714adc9dc42e6b05adfaa0940fd1d8c4f8855fd0945 not found: ID does not exist" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.110888 5014 scope.go:117] "RemoveContainer" containerID="76240dd94d6f9661e716d26feb123b9bb4a498809ee39ed2e4c4eed642fa31e1" Oct 06 21:51:36 crc kubenswrapper[5014]: E1006 21:51:36.111152 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76240dd94d6f9661e716d26feb123b9bb4a498809ee39ed2e4c4eed642fa31e1\": container with ID starting with 76240dd94d6f9661e716d26feb123b9bb4a498809ee39ed2e4c4eed642fa31e1 not found: ID does not exist" containerID="76240dd94d6f9661e716d26feb123b9bb4a498809ee39ed2e4c4eed642fa31e1" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.111188 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76240dd94d6f9661e716d26feb123b9bb4a498809ee39ed2e4c4eed642fa31e1"} err="failed to get container status \"76240dd94d6f9661e716d26feb123b9bb4a498809ee39ed2e4c4eed642fa31e1\": rpc error: code = NotFound desc = could not find container \"76240dd94d6f9661e716d26feb123b9bb4a498809ee39ed2e4c4eed642fa31e1\": container with ID starting with 76240dd94d6f9661e716d26feb123b9bb4a498809ee39ed2e4c4eed642fa31e1 not found: ID does not exist" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.330270 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.350437 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.359600 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:51:36 crc kubenswrapper[5014]: E1006 21:51:36.360096 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerName="ceilometer-central-agent" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.360121 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerName="ceilometer-central-agent" Oct 06 21:51:36 crc kubenswrapper[5014]: E1006 21:51:36.360137 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerName="proxy-httpd" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.360145 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerName="proxy-httpd" Oct 06 21:51:36 crc kubenswrapper[5014]: E1006 21:51:36.360154 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerName="ceilometer-notification-agent" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.360163 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerName="ceilometer-notification-agent" Oct 06 21:51:36 crc kubenswrapper[5014]: E1006 21:51:36.360172 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerName="sg-core" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.360178 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerName="sg-core" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.360426 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerName="ceilometer-central-agent" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.360441 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerName="sg-core" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.360452 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerName="ceilometer-notification-agent" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.360468 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" containerName="proxy-httpd" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.362652 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.368451 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.368670 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.368770 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.370780 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.475303 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.475741 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/273b6701-c286-40e4-a196-afeeb355d2c6-log-httpd\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.475778 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-config-data\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.475848 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/273b6701-c286-40e4-a196-afeeb355d2c6-run-httpd\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.475941 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.475987 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.476139 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fdw4\" (UniqueName: \"kubernetes.io/projected/273b6701-c286-40e4-a196-afeeb355d2c6-kube-api-access-5fdw4\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.476180 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-scripts\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.577602 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.577654 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.577723 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fdw4\" (UniqueName: \"kubernetes.io/projected/273b6701-c286-40e4-a196-afeeb355d2c6-kube-api-access-5fdw4\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.577748 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-scripts\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.578265 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.578325 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/273b6701-c286-40e4-a196-afeeb355d2c6-log-httpd\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.578344 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-config-data\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.578365 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/273b6701-c286-40e4-a196-afeeb355d2c6-run-httpd\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.578684 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/273b6701-c286-40e4-a196-afeeb355d2c6-run-httpd\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.578773 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/273b6701-c286-40e4-a196-afeeb355d2c6-log-httpd\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.583556 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-scripts\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.583594 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.583991 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.586694 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-config-data\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.593147 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.604015 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fdw4\" (UniqueName: \"kubernetes.io/projected/273b6701-c286-40e4-a196-afeeb355d2c6-kube-api-access-5fdw4\") pod \"ceilometer-0\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " pod="openstack/ceilometer-0" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.613647 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="81112a98-d817-4bf4-bb1e-288cb62e8577" containerName="kube-state-metrics" probeResult="failure" output="Get \"http://10.217.0.109:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 06 21:51:36 crc kubenswrapper[5014]: I1006 21:51:36.690804 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:51:37 crc kubenswrapper[5014]: I1006 21:51:37.169794 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:51:37 crc kubenswrapper[5014]: W1006 21:51:37.182125 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod273b6701_c286_40e4_a196_afeeb355d2c6.slice/crio-9e796e0751fa99ffee0c6f14a431836a440b7f4eeaab8c8545e350f889001913 WatchSource:0}: Error finding container 9e796e0751fa99ffee0c6f14a431836a440b7f4eeaab8c8545e350f889001913: Status 404 returned error can't find the container with id 9e796e0751fa99ffee0c6f14a431836a440b7f4eeaab8c8545e350f889001913 Oct 06 21:51:37 crc kubenswrapper[5014]: I1006 21:51:37.497356 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee8201de-0efa-4162-9ccf-0ab77a414bab" path="/var/lib/kubelet/pods/ee8201de-0efa-4162-9ccf-0ab77a414bab/volumes" Oct 06 21:51:38 crc kubenswrapper[5014]: I1006 21:51:38.016378 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"273b6701-c286-40e4-a196-afeeb355d2c6","Type":"ContainerStarted","Data":"8d55974c7fdd583786942eeefacf769c11072d6ae5090f636456ccfb886e3e7e"} Oct 06 21:51:38 crc kubenswrapper[5014]: I1006 21:51:38.016423 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"273b6701-c286-40e4-a196-afeeb355d2c6","Type":"ContainerStarted","Data":"9e796e0751fa99ffee0c6f14a431836a440b7f4eeaab8c8545e350f889001913"} Oct 06 21:51:39 crc kubenswrapper[5014]: I1006 21:51:39.029131 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"273b6701-c286-40e4-a196-afeeb355d2c6","Type":"ContainerStarted","Data":"f91f21080e6520d62691d55b06ca97bfce56efe2c4125fe1c953f209f272cb78"} Oct 06 21:51:39 crc kubenswrapper[5014]: I1006 21:51:39.215191 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 06 21:51:39 crc kubenswrapper[5014]: I1006 21:51:39.215246 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 06 21:51:39 crc kubenswrapper[5014]: I1006 21:51:39.373183 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Oct 06 21:51:40 crc kubenswrapper[5014]: I1006 21:51:40.061058 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"273b6701-c286-40e4-a196-afeeb355d2c6","Type":"ContainerStarted","Data":"76352c59e26f38fa0d1d9f829f881db38d21ec7add963422e92a37307c9ab76c"} Oct 06 21:51:40 crc kubenswrapper[5014]: I1006 21:51:40.232906 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="8585acea-0fdc-4307-b39c-f98d2d50f03b" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.192:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 06 21:51:40 crc kubenswrapper[5014]: I1006 21:51:40.232928 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="8585acea-0fdc-4307-b39c-f98d2d50f03b" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.192:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 06 21:51:42 crc kubenswrapper[5014]: I1006 21:51:42.091521 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"273b6701-c286-40e4-a196-afeeb355d2c6","Type":"ContainerStarted","Data":"c5185590ebaa718c87d8f54160d423a936fcb6f280ca61cf6a84f1bc8b26a8d9"} Oct 06 21:51:42 crc kubenswrapper[5014]: I1006 21:51:42.092521 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 06 21:51:42 crc kubenswrapper[5014]: I1006 21:51:42.136865 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.484422222 podStartE2EDuration="6.136838991s" podCreationTimestamp="2025-10-06 21:51:36 +0000 UTC" firstStartedPulling="2025-10-06 21:51:37.187018962 +0000 UTC m=+1242.480055696" lastFinishedPulling="2025-10-06 21:51:40.839435721 +0000 UTC m=+1246.132472465" observedRunningTime="2025-10-06 21:51:42.127579383 +0000 UTC m=+1247.420616157" watchObservedRunningTime="2025-10-06 21:51:42.136838991 +0000 UTC m=+1247.429875755" Oct 06 21:51:42 crc kubenswrapper[5014]: I1006 21:51:42.319263 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Oct 06 21:51:44 crc kubenswrapper[5014]: I1006 21:51:44.373590 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Oct 06 21:51:44 crc kubenswrapper[5014]: I1006 21:51:44.422257 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Oct 06 21:51:44 crc kubenswrapper[5014]: I1006 21:51:44.690438 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 06 21:51:44 crc kubenswrapper[5014]: I1006 21:51:44.690502 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 06 21:51:45 crc kubenswrapper[5014]: I1006 21:51:45.158906 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Oct 06 21:51:45 crc kubenswrapper[5014]: I1006 21:51:45.774853 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="636d6544-90a7-4c08-9498-2b7d25042ceb" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.196:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 06 21:51:45 crc kubenswrapper[5014]: I1006 21:51:45.775139 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="636d6544-90a7-4c08-9498-2b7d25042ceb" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.196:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 06 21:51:49 crc kubenswrapper[5014]: I1006 21:51:49.223443 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 06 21:51:49 crc kubenswrapper[5014]: I1006 21:51:49.227721 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 06 21:51:49 crc kubenswrapper[5014]: I1006 21:51:49.232711 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 06 21:51:50 crc kubenswrapper[5014]: I1006 21:51:50.205872 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.142112 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.224942 5014 generic.go:334] "Generic (PLEG): container finished" podID="a8735a7a-5287-4c79-ac48-8d03dc88b146" containerID="1656c7d6e0b91ae4a8c5e642556f1c2602908fcbd49f5364d3850728954bf952" exitCode=137 Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.225018 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a8735a7a-5287-4c79-ac48-8d03dc88b146","Type":"ContainerDied","Data":"1656c7d6e0b91ae4a8c5e642556f1c2602908fcbd49f5364d3850728954bf952"} Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.225054 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.225094 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a8735a7a-5287-4c79-ac48-8d03dc88b146","Type":"ContainerDied","Data":"496e79c38dbbc237833cb6febcccdec6c48847ea5dc133725bcfda55f09dd54c"} Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.225120 5014 scope.go:117] "RemoveContainer" containerID="1656c7d6e0b91ae4a8c5e642556f1c2602908fcbd49f5364d3850728954bf952" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.236992 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8735a7a-5287-4c79-ac48-8d03dc88b146-combined-ca-bundle\") pod \"a8735a7a-5287-4c79-ac48-8d03dc88b146\" (UID: \"a8735a7a-5287-4c79-ac48-8d03dc88b146\") " Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.237248 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8735a7a-5287-4c79-ac48-8d03dc88b146-config-data\") pod \"a8735a7a-5287-4c79-ac48-8d03dc88b146\" (UID: \"a8735a7a-5287-4c79-ac48-8d03dc88b146\") " Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.237358 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9wd2f\" (UniqueName: \"kubernetes.io/projected/a8735a7a-5287-4c79-ac48-8d03dc88b146-kube-api-access-9wd2f\") pod \"a8735a7a-5287-4c79-ac48-8d03dc88b146\" (UID: \"a8735a7a-5287-4c79-ac48-8d03dc88b146\") " Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.251173 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8735a7a-5287-4c79-ac48-8d03dc88b146-kube-api-access-9wd2f" (OuterVolumeSpecName: "kube-api-access-9wd2f") pod "a8735a7a-5287-4c79-ac48-8d03dc88b146" (UID: "a8735a7a-5287-4c79-ac48-8d03dc88b146"). InnerVolumeSpecName "kube-api-access-9wd2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.260520 5014 scope.go:117] "RemoveContainer" containerID="1656c7d6e0b91ae4a8c5e642556f1c2602908fcbd49f5364d3850728954bf952" Oct 06 21:51:52 crc kubenswrapper[5014]: E1006 21:51:52.261201 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1656c7d6e0b91ae4a8c5e642556f1c2602908fcbd49f5364d3850728954bf952\": container with ID starting with 1656c7d6e0b91ae4a8c5e642556f1c2602908fcbd49f5364d3850728954bf952 not found: ID does not exist" containerID="1656c7d6e0b91ae4a8c5e642556f1c2602908fcbd49f5364d3850728954bf952" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.261267 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1656c7d6e0b91ae4a8c5e642556f1c2602908fcbd49f5364d3850728954bf952"} err="failed to get container status \"1656c7d6e0b91ae4a8c5e642556f1c2602908fcbd49f5364d3850728954bf952\": rpc error: code = NotFound desc = could not find container \"1656c7d6e0b91ae4a8c5e642556f1c2602908fcbd49f5364d3850728954bf952\": container with ID starting with 1656c7d6e0b91ae4a8c5e642556f1c2602908fcbd49f5364d3850728954bf952 not found: ID does not exist" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.265928 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8735a7a-5287-4c79-ac48-8d03dc88b146-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a8735a7a-5287-4c79-ac48-8d03dc88b146" (UID: "a8735a7a-5287-4c79-ac48-8d03dc88b146"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.273483 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8735a7a-5287-4c79-ac48-8d03dc88b146-config-data" (OuterVolumeSpecName: "config-data") pod "a8735a7a-5287-4c79-ac48-8d03dc88b146" (UID: "a8735a7a-5287-4c79-ac48-8d03dc88b146"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.340777 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8735a7a-5287-4c79-ac48-8d03dc88b146-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.340835 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8735a7a-5287-4c79-ac48-8d03dc88b146-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.340855 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9wd2f\" (UniqueName: \"kubernetes.io/projected/a8735a7a-5287-4c79-ac48-8d03dc88b146-kube-api-access-9wd2f\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.583571 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.595390 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.629745 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 06 21:51:52 crc kubenswrapper[5014]: E1006 21:51:52.630770 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8735a7a-5287-4c79-ac48-8d03dc88b146" containerName="nova-cell1-novncproxy-novncproxy" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.630812 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8735a7a-5287-4c79-ac48-8d03dc88b146" containerName="nova-cell1-novncproxy-novncproxy" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.631298 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8735a7a-5287-4c79-ac48-8d03dc88b146" containerName="nova-cell1-novncproxy-novncproxy" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.632813 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.636764 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.642694 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.646583 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.646645 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.750454 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.750529 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.750790 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.750848 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zvv8\" (UniqueName: \"kubernetes.io/projected/49c2ecb8-63d7-4275-97ff-7aa899707212-kube-api-access-7zvv8\") pod \"nova-cell1-novncproxy-0\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.751137 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.852823 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.852949 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.852991 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.853103 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.853146 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zvv8\" (UniqueName: \"kubernetes.io/projected/49c2ecb8-63d7-4275-97ff-7aa899707212-kube-api-access-7zvv8\") pod \"nova-cell1-novncproxy-0\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.861281 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.862066 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.862516 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.870010 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.879290 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zvv8\" (UniqueName: \"kubernetes.io/projected/49c2ecb8-63d7-4275-97ff-7aa899707212-kube-api-access-7zvv8\") pod \"nova-cell1-novncproxy-0\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:52 crc kubenswrapper[5014]: I1006 21:51:52.960272 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:53 crc kubenswrapper[5014]: I1006 21:51:53.502143 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8735a7a-5287-4c79-ac48-8d03dc88b146" path="/var/lib/kubelet/pods/a8735a7a-5287-4c79-ac48-8d03dc88b146/volumes" Oct 06 21:51:53 crc kubenswrapper[5014]: I1006 21:51:53.505571 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 06 21:51:53 crc kubenswrapper[5014]: W1006 21:51:53.511764 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49c2ecb8_63d7_4275_97ff_7aa899707212.slice/crio-33fac91b5175fea94fcd5a738d6784dfa9f469df0acc66711d0dca96a16b2645 WatchSource:0}: Error finding container 33fac91b5175fea94fcd5a738d6784dfa9f469df0acc66711d0dca96a16b2645: Status 404 returned error can't find the container with id 33fac91b5175fea94fcd5a738d6784dfa9f469df0acc66711d0dca96a16b2645 Oct 06 21:51:54 crc kubenswrapper[5014]: I1006 21:51:54.258811 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"49c2ecb8-63d7-4275-97ff-7aa899707212","Type":"ContainerStarted","Data":"a6b7e7b2fd6d1afbe67e44ec51642bedfaa579e1f6be92a11c811ecf98a44b29"} Oct 06 21:51:54 crc kubenswrapper[5014]: I1006 21:51:54.259288 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"49c2ecb8-63d7-4275-97ff-7aa899707212","Type":"ContainerStarted","Data":"33fac91b5175fea94fcd5a738d6784dfa9f469df0acc66711d0dca96a16b2645"} Oct 06 21:51:54 crc kubenswrapper[5014]: I1006 21:51:54.294383 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.294354304 podStartE2EDuration="2.294354304s" podCreationTimestamp="2025-10-06 21:51:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:51:54.285015893 +0000 UTC m=+1259.578052627" watchObservedRunningTime="2025-10-06 21:51:54.294354304 +0000 UTC m=+1259.587391068" Oct 06 21:51:54 crc kubenswrapper[5014]: I1006 21:51:54.694322 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 06 21:51:54 crc kubenswrapper[5014]: I1006 21:51:54.696702 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 06 21:51:54 crc kubenswrapper[5014]: I1006 21:51:54.697021 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 06 21:51:54 crc kubenswrapper[5014]: I1006 21:51:54.700610 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.265938 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.268958 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.451779 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5967cc9597-h6t4m"] Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.550291 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.550680 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwwxv\" (UniqueName: \"kubernetes.io/projected/ff29a0b3-1307-4fdb-bead-68d87f2f2923-kube-api-access-lwwxv\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.550738 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-dns-swift-storage-0\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.551143 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-dns-svc\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.551255 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-ovsdbserver-sb\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.551681 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-ovsdbserver-nb\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.551721 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-config\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.585592 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5967cc9597-h6t4m"] Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.655422 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-ovsdbserver-nb\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.655478 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-config\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.656075 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwwxv\" (UniqueName: \"kubernetes.io/projected/ff29a0b3-1307-4fdb-bead-68d87f2f2923-kube-api-access-lwwxv\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.656276 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-dns-swift-storage-0\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.656358 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-dns-svc\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.656610 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-ovsdbserver-sb\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.657085 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-dns-swift-storage-0\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.657819 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-ovsdbserver-nb\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.658733 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-ovsdbserver-sb\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.660604 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-config\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.660745 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-dns-svc\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.679177 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwwxv\" (UniqueName: \"kubernetes.io/projected/ff29a0b3-1307-4fdb-bead-68d87f2f2923-kube-api-access-lwwxv\") pod \"dnsmasq-dns-5967cc9597-h6t4m\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:55 crc kubenswrapper[5014]: I1006 21:51:55.885786 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:56 crc kubenswrapper[5014]: W1006 21:51:56.390557 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff29a0b3_1307_4fdb_bead_68d87f2f2923.slice/crio-f81ffb74c2f7606ba2f55e7dcc2cd016020251f5c77642d89ac281c6cdb007f5 WatchSource:0}: Error finding container f81ffb74c2f7606ba2f55e7dcc2cd016020251f5c77642d89ac281c6cdb007f5: Status 404 returned error can't find the container with id f81ffb74c2f7606ba2f55e7dcc2cd016020251f5c77642d89ac281c6cdb007f5 Oct 06 21:51:56 crc kubenswrapper[5014]: I1006 21:51:56.392007 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5967cc9597-h6t4m"] Oct 06 21:51:57 crc kubenswrapper[5014]: I1006 21:51:57.288548 5014 generic.go:334] "Generic (PLEG): container finished" podID="ff29a0b3-1307-4fdb-bead-68d87f2f2923" containerID="8a37ab91c905eddb6ad7d2e3f88dac93cf2150ba561314df29c56fe058ecc2e8" exitCode=0 Oct 06 21:51:57 crc kubenswrapper[5014]: I1006 21:51:57.288657 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" event={"ID":"ff29a0b3-1307-4fdb-bead-68d87f2f2923","Type":"ContainerDied","Data":"8a37ab91c905eddb6ad7d2e3f88dac93cf2150ba561314df29c56fe058ecc2e8"} Oct 06 21:51:57 crc kubenswrapper[5014]: I1006 21:51:57.289578 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" event={"ID":"ff29a0b3-1307-4fdb-bead-68d87f2f2923","Type":"ContainerStarted","Data":"f81ffb74c2f7606ba2f55e7dcc2cd016020251f5c77642d89ac281c6cdb007f5"} Oct 06 21:51:57 crc kubenswrapper[5014]: I1006 21:51:57.513989 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:51:57 crc kubenswrapper[5014]: I1006 21:51:57.515039 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="ceilometer-central-agent" containerID="cri-o://8d55974c7fdd583786942eeefacf769c11072d6ae5090f636456ccfb886e3e7e" gracePeriod=30 Oct 06 21:51:57 crc kubenswrapper[5014]: I1006 21:51:57.515237 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="sg-core" containerID="cri-o://76352c59e26f38fa0d1d9f829f881db38d21ec7add963422e92a37307c9ab76c" gracePeriod=30 Oct 06 21:51:57 crc kubenswrapper[5014]: I1006 21:51:57.515329 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="ceilometer-notification-agent" containerID="cri-o://f91f21080e6520d62691d55b06ca97bfce56efe2c4125fe1c953f209f272cb78" gracePeriod=30 Oct 06 21:51:57 crc kubenswrapper[5014]: I1006 21:51:57.515361 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="proxy-httpd" containerID="cri-o://c5185590ebaa718c87d8f54160d423a936fcb6f280ca61cf6a84f1bc8b26a8d9" gracePeriod=30 Oct 06 21:51:57 crc kubenswrapper[5014]: I1006 21:51:57.529336 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.197:3000/\": read tcp 10.217.0.2:40482->10.217.0.197:3000: read: connection reset by peer" Oct 06 21:51:57 crc kubenswrapper[5014]: I1006 21:51:57.961368 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:51:58 crc kubenswrapper[5014]: I1006 21:51:58.254275 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:51:58 crc kubenswrapper[5014]: I1006 21:51:58.306022 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" event={"ID":"ff29a0b3-1307-4fdb-bead-68d87f2f2923","Type":"ContainerStarted","Data":"17be5865767feac1ef6592a1850f5a4352b1c7d266e2950f2d1dbfb81a9f252e"} Oct 06 21:51:58 crc kubenswrapper[5014]: I1006 21:51:58.308894 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:51:58 crc kubenswrapper[5014]: I1006 21:51:58.316461 5014 generic.go:334] "Generic (PLEG): container finished" podID="273b6701-c286-40e4-a196-afeeb355d2c6" containerID="c5185590ebaa718c87d8f54160d423a936fcb6f280ca61cf6a84f1bc8b26a8d9" exitCode=0 Oct 06 21:51:58 crc kubenswrapper[5014]: I1006 21:51:58.316516 5014 generic.go:334] "Generic (PLEG): container finished" podID="273b6701-c286-40e4-a196-afeeb355d2c6" containerID="76352c59e26f38fa0d1d9f829f881db38d21ec7add963422e92a37307c9ab76c" exitCode=2 Oct 06 21:51:58 crc kubenswrapper[5014]: I1006 21:51:58.316535 5014 generic.go:334] "Generic (PLEG): container finished" podID="273b6701-c286-40e4-a196-afeeb355d2c6" containerID="8d55974c7fdd583786942eeefacf769c11072d6ae5090f636456ccfb886e3e7e" exitCode=0 Oct 06 21:51:58 crc kubenswrapper[5014]: I1006 21:51:58.316714 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"273b6701-c286-40e4-a196-afeeb355d2c6","Type":"ContainerDied","Data":"c5185590ebaa718c87d8f54160d423a936fcb6f280ca61cf6a84f1bc8b26a8d9"} Oct 06 21:51:58 crc kubenswrapper[5014]: I1006 21:51:58.316790 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"273b6701-c286-40e4-a196-afeeb355d2c6","Type":"ContainerDied","Data":"76352c59e26f38fa0d1d9f829f881db38d21ec7add963422e92a37307c9ab76c"} Oct 06 21:51:58 crc kubenswrapper[5014]: I1006 21:51:58.316803 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"273b6701-c286-40e4-a196-afeeb355d2c6","Type":"ContainerDied","Data":"8d55974c7fdd583786942eeefacf769c11072d6ae5090f636456ccfb886e3e7e"} Oct 06 21:51:58 crc kubenswrapper[5014]: I1006 21:51:58.316897 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="636d6544-90a7-4c08-9498-2b7d25042ceb" containerName="nova-api-log" containerID="cri-o://3562b1b9cd7e520c2a0faca1204b345de46d675084b62cb15fe88c8ae162dc6e" gracePeriod=30 Oct 06 21:51:58 crc kubenswrapper[5014]: I1006 21:51:58.316959 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="636d6544-90a7-4c08-9498-2b7d25042ceb" containerName="nova-api-api" containerID="cri-o://fa67180965643bb9ff78ec51f933dd7ec3dc55dd16054c3840ad4f0148bc3a71" gracePeriod=30 Oct 06 21:51:58 crc kubenswrapper[5014]: I1006 21:51:58.346050 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" podStartSLOduration=3.346014057 podStartE2EDuration="3.346014057s" podCreationTimestamp="2025-10-06 21:51:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:51:58.342557716 +0000 UTC m=+1263.635594460" watchObservedRunningTime="2025-10-06 21:51:58.346014057 +0000 UTC m=+1263.639050791" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.332523 5014 generic.go:334] "Generic (PLEG): container finished" podID="273b6701-c286-40e4-a196-afeeb355d2c6" containerID="f91f21080e6520d62691d55b06ca97bfce56efe2c4125fe1c953f209f272cb78" exitCode=0 Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.332996 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"273b6701-c286-40e4-a196-afeeb355d2c6","Type":"ContainerDied","Data":"f91f21080e6520d62691d55b06ca97bfce56efe2c4125fe1c953f209f272cb78"} Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.335942 5014 generic.go:334] "Generic (PLEG): container finished" podID="636d6544-90a7-4c08-9498-2b7d25042ceb" containerID="3562b1b9cd7e520c2a0faca1204b345de46d675084b62cb15fe88c8ae162dc6e" exitCode=143 Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.336874 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"636d6544-90a7-4c08-9498-2b7d25042ceb","Type":"ContainerDied","Data":"3562b1b9cd7e520c2a0faca1204b345de46d675084b62cb15fe88c8ae162dc6e"} Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.554193 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.641794 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-combined-ca-bundle\") pod \"273b6701-c286-40e4-a196-afeeb355d2c6\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.641897 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-scripts\") pod \"273b6701-c286-40e4-a196-afeeb355d2c6\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.641956 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/273b6701-c286-40e4-a196-afeeb355d2c6-log-httpd\") pod \"273b6701-c286-40e4-a196-afeeb355d2c6\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.641985 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/273b6701-c286-40e4-a196-afeeb355d2c6-run-httpd\") pod \"273b6701-c286-40e4-a196-afeeb355d2c6\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.642076 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-ceilometer-tls-certs\") pod \"273b6701-c286-40e4-a196-afeeb355d2c6\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.642151 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-sg-core-conf-yaml\") pod \"273b6701-c286-40e4-a196-afeeb355d2c6\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.642190 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5fdw4\" (UniqueName: \"kubernetes.io/projected/273b6701-c286-40e4-a196-afeeb355d2c6-kube-api-access-5fdw4\") pod \"273b6701-c286-40e4-a196-afeeb355d2c6\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.642244 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-config-data\") pod \"273b6701-c286-40e4-a196-afeeb355d2c6\" (UID: \"273b6701-c286-40e4-a196-afeeb355d2c6\") " Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.642586 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/273b6701-c286-40e4-a196-afeeb355d2c6-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "273b6701-c286-40e4-a196-afeeb355d2c6" (UID: "273b6701-c286-40e4-a196-afeeb355d2c6"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.642969 5014 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/273b6701-c286-40e4-a196-afeeb355d2c6-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.644870 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/273b6701-c286-40e4-a196-afeeb355d2c6-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "273b6701-c286-40e4-a196-afeeb355d2c6" (UID: "273b6701-c286-40e4-a196-afeeb355d2c6"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.649283 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-scripts" (OuterVolumeSpecName: "scripts") pod "273b6701-c286-40e4-a196-afeeb355d2c6" (UID: "273b6701-c286-40e4-a196-afeeb355d2c6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.649989 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/273b6701-c286-40e4-a196-afeeb355d2c6-kube-api-access-5fdw4" (OuterVolumeSpecName: "kube-api-access-5fdw4") pod "273b6701-c286-40e4-a196-afeeb355d2c6" (UID: "273b6701-c286-40e4-a196-afeeb355d2c6"). InnerVolumeSpecName "kube-api-access-5fdw4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.718943 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "273b6701-c286-40e4-a196-afeeb355d2c6" (UID: "273b6701-c286-40e4-a196-afeeb355d2c6"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.745092 5014 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.745134 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5fdw4\" (UniqueName: \"kubernetes.io/projected/273b6701-c286-40e4-a196-afeeb355d2c6-kube-api-access-5fdw4\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.745143 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.745151 5014 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/273b6701-c286-40e4-a196-afeeb355d2c6-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.774522 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "273b6701-c286-40e4-a196-afeeb355d2c6" (UID: "273b6701-c286-40e4-a196-afeeb355d2c6"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.796551 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "273b6701-c286-40e4-a196-afeeb355d2c6" (UID: "273b6701-c286-40e4-a196-afeeb355d2c6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.796935 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-config-data" (OuterVolumeSpecName: "config-data") pod "273b6701-c286-40e4-a196-afeeb355d2c6" (UID: "273b6701-c286-40e4-a196-afeeb355d2c6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.847680 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.848076 5014 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:51:59 crc kubenswrapper[5014]: I1006 21:51:59.848086 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/273b6701-c286-40e4-a196-afeeb355d2c6-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.353558 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"273b6701-c286-40e4-a196-afeeb355d2c6","Type":"ContainerDied","Data":"9e796e0751fa99ffee0c6f14a431836a440b7f4eeaab8c8545e350f889001913"} Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.353688 5014 scope.go:117] "RemoveContainer" containerID="c5185590ebaa718c87d8f54160d423a936fcb6f280ca61cf6a84f1bc8b26a8d9" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.354008 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.385781 5014 scope.go:117] "RemoveContainer" containerID="76352c59e26f38fa0d1d9f829f881db38d21ec7add963422e92a37307c9ab76c" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.419675 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.421336 5014 scope.go:117] "RemoveContainer" containerID="f91f21080e6520d62691d55b06ca97bfce56efe2c4125fe1c953f209f272cb78" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.431518 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.447141 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:52:00 crc kubenswrapper[5014]: E1006 21:52:00.447651 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="proxy-httpd" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.447669 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="proxy-httpd" Oct 06 21:52:00 crc kubenswrapper[5014]: E1006 21:52:00.447693 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="ceilometer-notification-agent" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.447702 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="ceilometer-notification-agent" Oct 06 21:52:00 crc kubenswrapper[5014]: E1006 21:52:00.447728 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="ceilometer-central-agent" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.447738 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="ceilometer-central-agent" Oct 06 21:52:00 crc kubenswrapper[5014]: E1006 21:52:00.447765 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="sg-core" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.447773 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="sg-core" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.447993 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="sg-core" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.448008 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="ceilometer-central-agent" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.448034 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="proxy-httpd" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.448057 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" containerName="ceilometer-notification-agent" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.451501 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.456237 5014 scope.go:117] "RemoveContainer" containerID="8d55974c7fdd583786942eeefacf769c11072d6ae5090f636456ccfb886e3e7e" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.456714 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.456779 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.457038 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.470070 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.560916 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6rmb\" (UniqueName: \"kubernetes.io/projected/77557f72-0ba6-4b60-a286-e17bc63d9ce6-kube-api-access-g6rmb\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.560961 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.561008 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-config-data\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.561039 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-scripts\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.561070 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77557f72-0ba6-4b60-a286-e17bc63d9ce6-run-httpd\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.561085 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.561111 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77557f72-0ba6-4b60-a286-e17bc63d9ce6-log-httpd\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.561143 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.663391 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.663503 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6rmb\" (UniqueName: \"kubernetes.io/projected/77557f72-0ba6-4b60-a286-e17bc63d9ce6-kube-api-access-g6rmb\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.663534 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.663581 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-config-data\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.664630 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-scripts\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.664721 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77557f72-0ba6-4b60-a286-e17bc63d9ce6-run-httpd\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.664744 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.664791 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77557f72-0ba6-4b60-a286-e17bc63d9ce6-log-httpd\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.665207 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77557f72-0ba6-4b60-a286-e17bc63d9ce6-run-httpd\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.665472 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77557f72-0ba6-4b60-a286-e17bc63d9ce6-log-httpd\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.669235 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-scripts\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.669543 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-config-data\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.670354 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.680459 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.682087 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.683339 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6rmb\" (UniqueName: \"kubernetes.io/projected/77557f72-0ba6-4b60-a286-e17bc63d9ce6-kube-api-access-g6rmb\") pod \"ceilometer-0\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.783117 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:52:00 crc kubenswrapper[5014]: I1006 21:52:00.875808 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:52:01 crc kubenswrapper[5014]: I1006 21:52:01.256911 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:52:01 crc kubenswrapper[5014]: W1006 21:52:01.264686 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77557f72_0ba6_4b60_a286_e17bc63d9ce6.slice/crio-82e52758c43cb8afc27ae762fca7b247e9cd96b0ab89dc2d2072e5b84bfc2a73 WatchSource:0}: Error finding container 82e52758c43cb8afc27ae762fca7b247e9cd96b0ab89dc2d2072e5b84bfc2a73: Status 404 returned error can't find the container with id 82e52758c43cb8afc27ae762fca7b247e9cd96b0ab89dc2d2072e5b84bfc2a73 Oct 06 21:52:01 crc kubenswrapper[5014]: I1006 21:52:01.371290 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77557f72-0ba6-4b60-a286-e17bc63d9ce6","Type":"ContainerStarted","Data":"82e52758c43cb8afc27ae762fca7b247e9cd96b0ab89dc2d2072e5b84bfc2a73"} Oct 06 21:52:01 crc kubenswrapper[5014]: I1006 21:52:01.502527 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="273b6701-c286-40e4-a196-afeeb355d2c6" path="/var/lib/kubelet/pods/273b6701-c286-40e4-a196-afeeb355d2c6/volumes" Oct 06 21:52:01 crc kubenswrapper[5014]: I1006 21:52:01.952381 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.103349 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ssd7\" (UniqueName: \"kubernetes.io/projected/636d6544-90a7-4c08-9498-2b7d25042ceb-kube-api-access-9ssd7\") pod \"636d6544-90a7-4c08-9498-2b7d25042ceb\" (UID: \"636d6544-90a7-4c08-9498-2b7d25042ceb\") " Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.103514 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/636d6544-90a7-4c08-9498-2b7d25042ceb-logs\") pod \"636d6544-90a7-4c08-9498-2b7d25042ceb\" (UID: \"636d6544-90a7-4c08-9498-2b7d25042ceb\") " Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.103843 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/636d6544-90a7-4c08-9498-2b7d25042ceb-config-data\") pod \"636d6544-90a7-4c08-9498-2b7d25042ceb\" (UID: \"636d6544-90a7-4c08-9498-2b7d25042ceb\") " Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.104114 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/636d6544-90a7-4c08-9498-2b7d25042ceb-combined-ca-bundle\") pod \"636d6544-90a7-4c08-9498-2b7d25042ceb\" (UID: \"636d6544-90a7-4c08-9498-2b7d25042ceb\") " Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.104251 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/636d6544-90a7-4c08-9498-2b7d25042ceb-logs" (OuterVolumeSpecName: "logs") pod "636d6544-90a7-4c08-9498-2b7d25042ceb" (UID: "636d6544-90a7-4c08-9498-2b7d25042ceb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.104893 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/636d6544-90a7-4c08-9498-2b7d25042ceb-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.114978 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/636d6544-90a7-4c08-9498-2b7d25042ceb-kube-api-access-9ssd7" (OuterVolumeSpecName: "kube-api-access-9ssd7") pod "636d6544-90a7-4c08-9498-2b7d25042ceb" (UID: "636d6544-90a7-4c08-9498-2b7d25042ceb"). InnerVolumeSpecName "kube-api-access-9ssd7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.142367 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/636d6544-90a7-4c08-9498-2b7d25042ceb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "636d6544-90a7-4c08-9498-2b7d25042ceb" (UID: "636d6544-90a7-4c08-9498-2b7d25042ceb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.159115 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/636d6544-90a7-4c08-9498-2b7d25042ceb-config-data" (OuterVolumeSpecName: "config-data") pod "636d6544-90a7-4c08-9498-2b7d25042ceb" (UID: "636d6544-90a7-4c08-9498-2b7d25042ceb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.207042 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ssd7\" (UniqueName: \"kubernetes.io/projected/636d6544-90a7-4c08-9498-2b7d25042ceb-kube-api-access-9ssd7\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.207083 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/636d6544-90a7-4c08-9498-2b7d25042ceb-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.207095 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/636d6544-90a7-4c08-9498-2b7d25042ceb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.382293 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77557f72-0ba6-4b60-a286-e17bc63d9ce6","Type":"ContainerStarted","Data":"96b985b4ce05ae94a2bf6a1333a823f53b186d827434e59bacfe1fc81fa542d9"} Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.384677 5014 generic.go:334] "Generic (PLEG): container finished" podID="636d6544-90a7-4c08-9498-2b7d25042ceb" containerID="fa67180965643bb9ff78ec51f933dd7ec3dc55dd16054c3840ad4f0148bc3a71" exitCode=0 Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.384790 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.385016 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"636d6544-90a7-4c08-9498-2b7d25042ceb","Type":"ContainerDied","Data":"fa67180965643bb9ff78ec51f933dd7ec3dc55dd16054c3840ad4f0148bc3a71"} Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.385138 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"636d6544-90a7-4c08-9498-2b7d25042ceb","Type":"ContainerDied","Data":"a01eecd3417a59e813aca32ffaa906f5548a3be4bddf3cb233aa19b807547a62"} Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.385225 5014 scope.go:117] "RemoveContainer" containerID="fa67180965643bb9ff78ec51f933dd7ec3dc55dd16054c3840ad4f0148bc3a71" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.415764 5014 scope.go:117] "RemoveContainer" containerID="3562b1b9cd7e520c2a0faca1204b345de46d675084b62cb15fe88c8ae162dc6e" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.437474 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.450719 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.461943 5014 scope.go:117] "RemoveContainer" containerID="fa67180965643bb9ff78ec51f933dd7ec3dc55dd16054c3840ad4f0148bc3a71" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.463896 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 06 21:52:02 crc kubenswrapper[5014]: E1006 21:52:02.464586 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="636d6544-90a7-4c08-9498-2b7d25042ceb" containerName="nova-api-api" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.464652 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="636d6544-90a7-4c08-9498-2b7d25042ceb" containerName="nova-api-api" Oct 06 21:52:02 crc kubenswrapper[5014]: E1006 21:52:02.464708 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="636d6544-90a7-4c08-9498-2b7d25042ceb" containerName="nova-api-log" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.464723 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="636d6544-90a7-4c08-9498-2b7d25042ceb" containerName="nova-api-log" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.465060 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="636d6544-90a7-4c08-9498-2b7d25042ceb" containerName="nova-api-api" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.465095 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="636d6544-90a7-4c08-9498-2b7d25042ceb" containerName="nova-api-log" Oct 06 21:52:02 crc kubenswrapper[5014]: E1006 21:52:02.465120 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa67180965643bb9ff78ec51f933dd7ec3dc55dd16054c3840ad4f0148bc3a71\": container with ID starting with fa67180965643bb9ff78ec51f933dd7ec3dc55dd16054c3840ad4f0148bc3a71 not found: ID does not exist" containerID="fa67180965643bb9ff78ec51f933dd7ec3dc55dd16054c3840ad4f0148bc3a71" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.465165 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa67180965643bb9ff78ec51f933dd7ec3dc55dd16054c3840ad4f0148bc3a71"} err="failed to get container status \"fa67180965643bb9ff78ec51f933dd7ec3dc55dd16054c3840ad4f0148bc3a71\": rpc error: code = NotFound desc = could not find container \"fa67180965643bb9ff78ec51f933dd7ec3dc55dd16054c3840ad4f0148bc3a71\": container with ID starting with fa67180965643bb9ff78ec51f933dd7ec3dc55dd16054c3840ad4f0148bc3a71 not found: ID does not exist" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.465195 5014 scope.go:117] "RemoveContainer" containerID="3562b1b9cd7e520c2a0faca1204b345de46d675084b62cb15fe88c8ae162dc6e" Oct 06 21:52:02 crc kubenswrapper[5014]: E1006 21:52:02.465522 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3562b1b9cd7e520c2a0faca1204b345de46d675084b62cb15fe88c8ae162dc6e\": container with ID starting with 3562b1b9cd7e520c2a0faca1204b345de46d675084b62cb15fe88c8ae162dc6e not found: ID does not exist" containerID="3562b1b9cd7e520c2a0faca1204b345de46d675084b62cb15fe88c8ae162dc6e" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.465545 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3562b1b9cd7e520c2a0faca1204b345de46d675084b62cb15fe88c8ae162dc6e"} err="failed to get container status \"3562b1b9cd7e520c2a0faca1204b345de46d675084b62cb15fe88c8ae162dc6e\": rpc error: code = NotFound desc = could not find container \"3562b1b9cd7e520c2a0faca1204b345de46d675084b62cb15fe88c8ae162dc6e\": container with ID starting with 3562b1b9cd7e520c2a0faca1204b345de46d675084b62cb15fe88c8ae162dc6e not found: ID does not exist" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.466702 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.471672 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.471707 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.471932 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.473560 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.615507 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-public-tls-certs\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.615612 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlxkl\" (UniqueName: \"kubernetes.io/projected/97ca4b1b-5ee1-4adf-be50-3c7101840e66-kube-api-access-vlxkl\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.615689 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-internal-tls-certs\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.615783 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-config-data\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.615829 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97ca4b1b-5ee1-4adf-be50-3c7101840e66-logs\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.615883 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.717612 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.717954 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-public-tls-certs\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.717996 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlxkl\" (UniqueName: \"kubernetes.io/projected/97ca4b1b-5ee1-4adf-be50-3c7101840e66-kube-api-access-vlxkl\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.718024 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-internal-tls-certs\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.718101 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-config-data\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.718141 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97ca4b1b-5ee1-4adf-be50-3c7101840e66-logs\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.718500 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97ca4b1b-5ee1-4adf-be50-3c7101840e66-logs\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.722217 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-config-data\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.722750 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.725377 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-public-tls-certs\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.727442 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-internal-tls-certs\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.742207 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlxkl\" (UniqueName: \"kubernetes.io/projected/97ca4b1b-5ee1-4adf-be50-3c7101840e66-kube-api-access-vlxkl\") pod \"nova-api-0\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.794759 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 06 21:52:02 crc kubenswrapper[5014]: I1006 21:52:02.960776 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:52:03 crc kubenswrapper[5014]: I1006 21:52:03.011733 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:52:03 crc kubenswrapper[5014]: I1006 21:52:03.323596 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:52:03 crc kubenswrapper[5014]: I1006 21:52:03.415670 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"97ca4b1b-5ee1-4adf-be50-3c7101840e66","Type":"ContainerStarted","Data":"4da6c336351568efe00a6c438328748c0c6aea8c1b99a56af86c12f8d79e287f"} Oct 06 21:52:03 crc kubenswrapper[5014]: I1006 21:52:03.420650 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77557f72-0ba6-4b60-a286-e17bc63d9ce6","Type":"ContainerStarted","Data":"b0866dca99a5dfd61f1204eb15d2fa4707e29e8cb965bfcba38c587a96caa925"} Oct 06 21:52:03 crc kubenswrapper[5014]: I1006 21:52:03.453469 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:52:03 crc kubenswrapper[5014]: I1006 21:52:03.510051 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="636d6544-90a7-4c08-9498-2b7d25042ceb" path="/var/lib/kubelet/pods/636d6544-90a7-4c08-9498-2b7d25042ceb/volumes" Oct 06 21:52:03 crc kubenswrapper[5014]: I1006 21:52:03.803678 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-2qbdf"] Oct 06 21:52:03 crc kubenswrapper[5014]: I1006 21:52:03.805199 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-2qbdf" Oct 06 21:52:03 crc kubenswrapper[5014]: I1006 21:52:03.810084 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Oct 06 21:52:03 crc kubenswrapper[5014]: I1006 21:52:03.810117 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Oct 06 21:52:03 crc kubenswrapper[5014]: I1006 21:52:03.813335 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-2qbdf"] Oct 06 21:52:03 crc kubenswrapper[5014]: I1006 21:52:03.958189 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-config-data\") pod \"nova-cell1-cell-mapping-2qbdf\" (UID: \"388c3436-9432-48f2-ab10-6741959aebf5\") " pod="openstack/nova-cell1-cell-mapping-2qbdf" Oct 06 21:52:03 crc kubenswrapper[5014]: I1006 21:52:03.958368 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7zn4\" (UniqueName: \"kubernetes.io/projected/388c3436-9432-48f2-ab10-6741959aebf5-kube-api-access-s7zn4\") pod \"nova-cell1-cell-mapping-2qbdf\" (UID: \"388c3436-9432-48f2-ab10-6741959aebf5\") " pod="openstack/nova-cell1-cell-mapping-2qbdf" Oct 06 21:52:03 crc kubenswrapper[5014]: I1006 21:52:03.958604 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-scripts\") pod \"nova-cell1-cell-mapping-2qbdf\" (UID: \"388c3436-9432-48f2-ab10-6741959aebf5\") " pod="openstack/nova-cell1-cell-mapping-2qbdf" Oct 06 21:52:03 crc kubenswrapper[5014]: I1006 21:52:03.958686 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-2qbdf\" (UID: \"388c3436-9432-48f2-ab10-6741959aebf5\") " pod="openstack/nova-cell1-cell-mapping-2qbdf" Oct 06 21:52:04 crc kubenswrapper[5014]: I1006 21:52:04.060727 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-scripts\") pod \"nova-cell1-cell-mapping-2qbdf\" (UID: \"388c3436-9432-48f2-ab10-6741959aebf5\") " pod="openstack/nova-cell1-cell-mapping-2qbdf" Oct 06 21:52:04 crc kubenswrapper[5014]: I1006 21:52:04.060778 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-2qbdf\" (UID: \"388c3436-9432-48f2-ab10-6741959aebf5\") " pod="openstack/nova-cell1-cell-mapping-2qbdf" Oct 06 21:52:04 crc kubenswrapper[5014]: I1006 21:52:04.060850 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-config-data\") pod \"nova-cell1-cell-mapping-2qbdf\" (UID: \"388c3436-9432-48f2-ab10-6741959aebf5\") " pod="openstack/nova-cell1-cell-mapping-2qbdf" Oct 06 21:52:04 crc kubenswrapper[5014]: I1006 21:52:04.060884 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7zn4\" (UniqueName: \"kubernetes.io/projected/388c3436-9432-48f2-ab10-6741959aebf5-kube-api-access-s7zn4\") pod \"nova-cell1-cell-mapping-2qbdf\" (UID: \"388c3436-9432-48f2-ab10-6741959aebf5\") " pod="openstack/nova-cell1-cell-mapping-2qbdf" Oct 06 21:52:04 crc kubenswrapper[5014]: I1006 21:52:04.065972 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-config-data\") pod \"nova-cell1-cell-mapping-2qbdf\" (UID: \"388c3436-9432-48f2-ab10-6741959aebf5\") " pod="openstack/nova-cell1-cell-mapping-2qbdf" Oct 06 21:52:04 crc kubenswrapper[5014]: I1006 21:52:04.066040 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-2qbdf\" (UID: \"388c3436-9432-48f2-ab10-6741959aebf5\") " pod="openstack/nova-cell1-cell-mapping-2qbdf" Oct 06 21:52:04 crc kubenswrapper[5014]: I1006 21:52:04.073129 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-scripts\") pod \"nova-cell1-cell-mapping-2qbdf\" (UID: \"388c3436-9432-48f2-ab10-6741959aebf5\") " pod="openstack/nova-cell1-cell-mapping-2qbdf" Oct 06 21:52:04 crc kubenswrapper[5014]: I1006 21:52:04.078480 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7zn4\" (UniqueName: \"kubernetes.io/projected/388c3436-9432-48f2-ab10-6741959aebf5-kube-api-access-s7zn4\") pod \"nova-cell1-cell-mapping-2qbdf\" (UID: \"388c3436-9432-48f2-ab10-6741959aebf5\") " pod="openstack/nova-cell1-cell-mapping-2qbdf" Oct 06 21:52:04 crc kubenswrapper[5014]: I1006 21:52:04.139252 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-2qbdf" Oct 06 21:52:04 crc kubenswrapper[5014]: I1006 21:52:04.438206 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"97ca4b1b-5ee1-4adf-be50-3c7101840e66","Type":"ContainerStarted","Data":"b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e"} Oct 06 21:52:04 crc kubenswrapper[5014]: I1006 21:52:04.438905 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"97ca4b1b-5ee1-4adf-be50-3c7101840e66","Type":"ContainerStarted","Data":"2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002"} Oct 06 21:52:04 crc kubenswrapper[5014]: I1006 21:52:04.451941 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77557f72-0ba6-4b60-a286-e17bc63d9ce6","Type":"ContainerStarted","Data":"e6a645510b6ca4a358f976b73e22cad321588d5a2db9221460a8709535276d78"} Oct 06 21:52:04 crc kubenswrapper[5014]: I1006 21:52:04.461518 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.461496288 podStartE2EDuration="2.461496288s" podCreationTimestamp="2025-10-06 21:52:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:52:04.454165072 +0000 UTC m=+1269.747201826" watchObservedRunningTime="2025-10-06 21:52:04.461496288 +0000 UTC m=+1269.754533022" Oct 06 21:52:04 crc kubenswrapper[5014]: I1006 21:52:04.639958 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-2qbdf"] Oct 06 21:52:04 crc kubenswrapper[5014]: W1006 21:52:04.644299 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod388c3436_9432_48f2_ab10_6741959aebf5.slice/crio-341d9ac916662f18f219eb33e003adc2c1bca6f64a31565ff5e211cce6a1f2bb WatchSource:0}: Error finding container 341d9ac916662f18f219eb33e003adc2c1bca6f64a31565ff5e211cce6a1f2bb: Status 404 returned error can't find the container with id 341d9ac916662f18f219eb33e003adc2c1bca6f64a31565ff5e211cce6a1f2bb Oct 06 21:52:05 crc kubenswrapper[5014]: I1006 21:52:05.461751 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-2qbdf" event={"ID":"388c3436-9432-48f2-ab10-6741959aebf5","Type":"ContainerStarted","Data":"7e310f1e94472fbc3a79b40ea198846da76836cf2835e1612a43c414454648ad"} Oct 06 21:52:05 crc kubenswrapper[5014]: I1006 21:52:05.462247 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-2qbdf" event={"ID":"388c3436-9432-48f2-ab10-6741959aebf5","Type":"ContainerStarted","Data":"341d9ac916662f18f219eb33e003adc2c1bca6f64a31565ff5e211cce6a1f2bb"} Oct 06 21:52:05 crc kubenswrapper[5014]: I1006 21:52:05.465871 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77557f72-0ba6-4b60-a286-e17bc63d9ce6","Type":"ContainerStarted","Data":"883270dae602a8ddce5dcae21e3f094986d01b7ae3fdd7840c45caf576eee915"} Oct 06 21:52:05 crc kubenswrapper[5014]: I1006 21:52:05.466297 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerName="proxy-httpd" containerID="cri-o://883270dae602a8ddce5dcae21e3f094986d01b7ae3fdd7840c45caf576eee915" gracePeriod=30 Oct 06 21:52:05 crc kubenswrapper[5014]: I1006 21:52:05.466309 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerName="ceilometer-central-agent" containerID="cri-o://96b985b4ce05ae94a2bf6a1333a823f53b186d827434e59bacfe1fc81fa542d9" gracePeriod=30 Oct 06 21:52:05 crc kubenswrapper[5014]: I1006 21:52:05.466320 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerName="sg-core" containerID="cri-o://e6a645510b6ca4a358f976b73e22cad321588d5a2db9221460a8709535276d78" gracePeriod=30 Oct 06 21:52:05 crc kubenswrapper[5014]: I1006 21:52:05.466367 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerName="ceilometer-notification-agent" containerID="cri-o://b0866dca99a5dfd61f1204eb15d2fa4707e29e8cb965bfcba38c587a96caa925" gracePeriod=30 Oct 06 21:52:05 crc kubenswrapper[5014]: I1006 21:52:05.489200 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-2qbdf" podStartSLOduration=2.4891712 podStartE2EDuration="2.4891712s" podCreationTimestamp="2025-10-06 21:52:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:52:05.485095829 +0000 UTC m=+1270.778132563" watchObservedRunningTime="2025-10-06 21:52:05.4891712 +0000 UTC m=+1270.782207974" Oct 06 21:52:05 crc kubenswrapper[5014]: I1006 21:52:05.534563 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.844104368 podStartE2EDuration="5.534543583s" podCreationTimestamp="2025-10-06 21:52:00 +0000 UTC" firstStartedPulling="2025-10-06 21:52:01.268783665 +0000 UTC m=+1266.561820439" lastFinishedPulling="2025-10-06 21:52:04.9592229 +0000 UTC m=+1270.252259654" observedRunningTime="2025-10-06 21:52:05.526427112 +0000 UTC m=+1270.819463846" watchObservedRunningTime="2025-10-06 21:52:05.534543583 +0000 UTC m=+1270.827580317" Oct 06 21:52:05 crc kubenswrapper[5014]: I1006 21:52:05.888015 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:52:05 crc kubenswrapper[5014]: I1006 21:52:05.976413 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64d8d96789-x75b8"] Oct 06 21:52:05 crc kubenswrapper[5014]: I1006 21:52:05.976741 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-64d8d96789-x75b8" podUID="486ed6a2-9dc8-49de-8601-09042e42e5b1" containerName="dnsmasq-dns" containerID="cri-o://2c01c113ed0c4688ed51a7ea51e16ece0de9c6f03bc70c0f7a9b90f07fcbcac6" gracePeriod=10 Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.481932 5014 generic.go:334] "Generic (PLEG): container finished" podID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerID="883270dae602a8ddce5dcae21e3f094986d01b7ae3fdd7840c45caf576eee915" exitCode=0 Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.484789 5014 generic.go:334] "Generic (PLEG): container finished" podID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerID="e6a645510b6ca4a358f976b73e22cad321588d5a2db9221460a8709535276d78" exitCode=2 Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.484877 5014 generic.go:334] "Generic (PLEG): container finished" podID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerID="b0866dca99a5dfd61f1204eb15d2fa4707e29e8cb965bfcba38c587a96caa925" exitCode=0 Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.484761 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77557f72-0ba6-4b60-a286-e17bc63d9ce6","Type":"ContainerDied","Data":"883270dae602a8ddce5dcae21e3f094986d01b7ae3fdd7840c45caf576eee915"} Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.485129 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77557f72-0ba6-4b60-a286-e17bc63d9ce6","Type":"ContainerDied","Data":"e6a645510b6ca4a358f976b73e22cad321588d5a2db9221460a8709535276d78"} Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.485193 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77557f72-0ba6-4b60-a286-e17bc63d9ce6","Type":"ContainerDied","Data":"b0866dca99a5dfd61f1204eb15d2fa4707e29e8cb965bfcba38c587a96caa925"} Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.489050 5014 generic.go:334] "Generic (PLEG): container finished" podID="486ed6a2-9dc8-49de-8601-09042e42e5b1" containerID="2c01c113ed0c4688ed51a7ea51e16ece0de9c6f03bc70c0f7a9b90f07fcbcac6" exitCode=0 Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.489204 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64d8d96789-x75b8" event={"ID":"486ed6a2-9dc8-49de-8601-09042e42e5b1","Type":"ContainerDied","Data":"2c01c113ed0c4688ed51a7ea51e16ece0de9c6f03bc70c0f7a9b90f07fcbcac6"} Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.489298 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64d8d96789-x75b8" event={"ID":"486ed6a2-9dc8-49de-8601-09042e42e5b1","Type":"ContainerDied","Data":"11bd2ee53cde7311f83040a920d1edf02d0c41157fdf02d4c2d80b246f2d3d32"} Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.489377 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="11bd2ee53cde7311f83040a920d1edf02d0c41157fdf02d4c2d80b246f2d3d32" Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.507014 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.619975 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-ovsdbserver-sb\") pod \"486ed6a2-9dc8-49de-8601-09042e42e5b1\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.620055 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-ovsdbserver-nb\") pod \"486ed6a2-9dc8-49de-8601-09042e42e5b1\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.620080 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-dns-svc\") pod \"486ed6a2-9dc8-49de-8601-09042e42e5b1\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.620134 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-dns-swift-storage-0\") pod \"486ed6a2-9dc8-49de-8601-09042e42e5b1\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.620189 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-config\") pod \"486ed6a2-9dc8-49de-8601-09042e42e5b1\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.620245 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4fsbn\" (UniqueName: \"kubernetes.io/projected/486ed6a2-9dc8-49de-8601-09042e42e5b1-kube-api-access-4fsbn\") pod \"486ed6a2-9dc8-49de-8601-09042e42e5b1\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.631922 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/486ed6a2-9dc8-49de-8601-09042e42e5b1-kube-api-access-4fsbn" (OuterVolumeSpecName: "kube-api-access-4fsbn") pod "486ed6a2-9dc8-49de-8601-09042e42e5b1" (UID: "486ed6a2-9dc8-49de-8601-09042e42e5b1"). InnerVolumeSpecName "kube-api-access-4fsbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.690122 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "486ed6a2-9dc8-49de-8601-09042e42e5b1" (UID: "486ed6a2-9dc8-49de-8601-09042e42e5b1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.696524 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "486ed6a2-9dc8-49de-8601-09042e42e5b1" (UID: "486ed6a2-9dc8-49de-8601-09042e42e5b1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.697735 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-config" (OuterVolumeSpecName: "config") pod "486ed6a2-9dc8-49de-8601-09042e42e5b1" (UID: "486ed6a2-9dc8-49de-8601-09042e42e5b1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:52:06 crc kubenswrapper[5014]: E1006 21:52:06.712653 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-dns-swift-storage-0 podName:486ed6a2-9dc8-49de-8601-09042e42e5b1 nodeName:}" failed. No retries permitted until 2025-10-06 21:52:07.212600025 +0000 UTC m=+1272.505636779 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "dns-swift-storage-0" (UniqueName: "kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-dns-swift-storage-0") pod "486ed6a2-9dc8-49de-8601-09042e42e5b1" (UID: "486ed6a2-9dc8-49de-8601-09042e42e5b1") : error deleting /var/lib/kubelet/pods/486ed6a2-9dc8-49de-8601-09042e42e5b1/volume-subpaths: remove /var/lib/kubelet/pods/486ed6a2-9dc8-49de-8601-09042e42e5b1/volume-subpaths: no such file or directory Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.713246 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "486ed6a2-9dc8-49de-8601-09042e42e5b1" (UID: "486ed6a2-9dc8-49de-8601-09042e42e5b1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.723313 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4fsbn\" (UniqueName: \"kubernetes.io/projected/486ed6a2-9dc8-49de-8601-09042e42e5b1-kube-api-access-4fsbn\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.723347 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.723364 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.723379 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:06 crc kubenswrapper[5014]: I1006 21:52:06.723391 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:07 crc kubenswrapper[5014]: I1006 21:52:07.231309 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-dns-swift-storage-0\") pod \"486ed6a2-9dc8-49de-8601-09042e42e5b1\" (UID: \"486ed6a2-9dc8-49de-8601-09042e42e5b1\") " Oct 06 21:52:07 crc kubenswrapper[5014]: I1006 21:52:07.231826 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "486ed6a2-9dc8-49de-8601-09042e42e5b1" (UID: "486ed6a2-9dc8-49de-8601-09042e42e5b1"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:52:07 crc kubenswrapper[5014]: I1006 21:52:07.232245 5014 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/486ed6a2-9dc8-49de-8601-09042e42e5b1-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:07 crc kubenswrapper[5014]: I1006 21:52:07.503309 5014 generic.go:334] "Generic (PLEG): container finished" podID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerID="96b985b4ce05ae94a2bf6a1333a823f53b186d827434e59bacfe1fc81fa542d9" exitCode=0 Oct 06 21:52:07 crc kubenswrapper[5014]: I1006 21:52:07.503365 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77557f72-0ba6-4b60-a286-e17bc63d9ce6","Type":"ContainerDied","Data":"96b985b4ce05ae94a2bf6a1333a823f53b186d827434e59bacfe1fc81fa542d9"} Oct 06 21:52:07 crc kubenswrapper[5014]: I1006 21:52:07.503439 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64d8d96789-x75b8" Oct 06 21:52:07 crc kubenswrapper[5014]: I1006 21:52:07.603341 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64d8d96789-x75b8"] Oct 06 21:52:07 crc kubenswrapper[5014]: I1006 21:52:07.611199 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-64d8d96789-x75b8"] Oct 06 21:52:07 crc kubenswrapper[5014]: I1006 21:52:07.891733 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.046227 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-sg-core-conf-yaml\") pod \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.046278 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-combined-ca-bundle\") pod \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.046309 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6rmb\" (UniqueName: \"kubernetes.io/projected/77557f72-0ba6-4b60-a286-e17bc63d9ce6-kube-api-access-g6rmb\") pod \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.046345 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-scripts\") pod \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.046479 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-config-data\") pod \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.046512 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-ceilometer-tls-certs\") pod \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.046562 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77557f72-0ba6-4b60-a286-e17bc63d9ce6-run-httpd\") pod \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.046765 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77557f72-0ba6-4b60-a286-e17bc63d9ce6-log-httpd\") pod \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\" (UID: \"77557f72-0ba6-4b60-a286-e17bc63d9ce6\") " Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.047500 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77557f72-0ba6-4b60-a286-e17bc63d9ce6-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "77557f72-0ba6-4b60-a286-e17bc63d9ce6" (UID: "77557f72-0ba6-4b60-a286-e17bc63d9ce6"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.047774 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77557f72-0ba6-4b60-a286-e17bc63d9ce6-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "77557f72-0ba6-4b60-a286-e17bc63d9ce6" (UID: "77557f72-0ba6-4b60-a286-e17bc63d9ce6"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.053521 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77557f72-0ba6-4b60-a286-e17bc63d9ce6-kube-api-access-g6rmb" (OuterVolumeSpecName: "kube-api-access-g6rmb") pod "77557f72-0ba6-4b60-a286-e17bc63d9ce6" (UID: "77557f72-0ba6-4b60-a286-e17bc63d9ce6"). InnerVolumeSpecName "kube-api-access-g6rmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.053765 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-scripts" (OuterVolumeSpecName: "scripts") pod "77557f72-0ba6-4b60-a286-e17bc63d9ce6" (UID: "77557f72-0ba6-4b60-a286-e17bc63d9ce6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.089531 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "77557f72-0ba6-4b60-a286-e17bc63d9ce6" (UID: "77557f72-0ba6-4b60-a286-e17bc63d9ce6"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.116578 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "77557f72-0ba6-4b60-a286-e17bc63d9ce6" (UID: "77557f72-0ba6-4b60-a286-e17bc63d9ce6"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.141952 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "77557f72-0ba6-4b60-a286-e17bc63d9ce6" (UID: "77557f72-0ba6-4b60-a286-e17bc63d9ce6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.149546 5014 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77557f72-0ba6-4b60-a286-e17bc63d9ce6-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.149600 5014 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.149628 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.149647 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6rmb\" (UniqueName: \"kubernetes.io/projected/77557f72-0ba6-4b60-a286-e17bc63d9ce6-kube-api-access-g6rmb\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.149659 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.149669 5014 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.149681 5014 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/77557f72-0ba6-4b60-a286-e17bc63d9ce6-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.181766 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-config-data" (OuterVolumeSpecName: "config-data") pod "77557f72-0ba6-4b60-a286-e17bc63d9ce6" (UID: "77557f72-0ba6-4b60-a286-e17bc63d9ce6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.251994 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77557f72-0ba6-4b60-a286-e17bc63d9ce6-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.518585 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"77557f72-0ba6-4b60-a286-e17bc63d9ce6","Type":"ContainerDied","Data":"82e52758c43cb8afc27ae762fca7b247e9cd96b0ab89dc2d2072e5b84bfc2a73"} Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.519089 5014 scope.go:117] "RemoveContainer" containerID="883270dae602a8ddce5dcae21e3f094986d01b7ae3fdd7840c45caf576eee915" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.519316 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.612865 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.613939 5014 scope.go:117] "RemoveContainer" containerID="e6a645510b6ca4a358f976b73e22cad321588d5a2db9221460a8709535276d78" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.635905 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.646501 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:52:08 crc kubenswrapper[5014]: E1006 21:52:08.647159 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="486ed6a2-9dc8-49de-8601-09042e42e5b1" containerName="dnsmasq-dns" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.647181 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="486ed6a2-9dc8-49de-8601-09042e42e5b1" containerName="dnsmasq-dns" Oct 06 21:52:08 crc kubenswrapper[5014]: E1006 21:52:08.647217 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="486ed6a2-9dc8-49de-8601-09042e42e5b1" containerName="init" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.647224 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="486ed6a2-9dc8-49de-8601-09042e42e5b1" containerName="init" Oct 06 21:52:08 crc kubenswrapper[5014]: E1006 21:52:08.647236 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerName="ceilometer-central-agent" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.647245 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerName="ceilometer-central-agent" Oct 06 21:52:08 crc kubenswrapper[5014]: E1006 21:52:08.647261 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerName="sg-core" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.647268 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerName="sg-core" Oct 06 21:52:08 crc kubenswrapper[5014]: E1006 21:52:08.647291 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerName="ceilometer-notification-agent" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.647299 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerName="ceilometer-notification-agent" Oct 06 21:52:08 crc kubenswrapper[5014]: E1006 21:52:08.647309 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerName="proxy-httpd" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.647317 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerName="proxy-httpd" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.653901 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerName="ceilometer-notification-agent" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.653938 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerName="proxy-httpd" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.653956 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerName="ceilometer-central-agent" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.653971 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="486ed6a2-9dc8-49de-8601-09042e42e5b1" containerName="dnsmasq-dns" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.653987 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" containerName="sg-core" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.656184 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.659774 5014 scope.go:117] "RemoveContainer" containerID="b0866dca99a5dfd61f1204eb15d2fa4707e29e8cb965bfcba38c587a96caa925" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.660248 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.660296 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.660598 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.665815 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.703852 5014 scope.go:117] "RemoveContainer" containerID="96b985b4ce05ae94a2bf6a1333a823f53b186d827434e59bacfe1fc81fa542d9" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.762324 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/32e89ff3-0d60-4fd1-9c0d-831b92311165-run-httpd\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.762376 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-scripts\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.762422 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.762581 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-config-data\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.762609 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.762652 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4q6b2\" (UniqueName: \"kubernetes.io/projected/32e89ff3-0d60-4fd1-9c0d-831b92311165-kube-api-access-4q6b2\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.762722 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/32e89ff3-0d60-4fd1-9c0d-831b92311165-log-httpd\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.762796 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.863953 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-config-data\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.864011 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.864039 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4q6b2\" (UniqueName: \"kubernetes.io/projected/32e89ff3-0d60-4fd1-9c0d-831b92311165-kube-api-access-4q6b2\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.864069 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/32e89ff3-0d60-4fd1-9c0d-831b92311165-log-httpd\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.864122 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.864159 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/32e89ff3-0d60-4fd1-9c0d-831b92311165-run-httpd\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.864191 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-scripts\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.864237 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.864877 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/32e89ff3-0d60-4fd1-9c0d-831b92311165-run-httpd\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.864961 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/32e89ff3-0d60-4fd1-9c0d-831b92311165-log-httpd\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.869823 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.871069 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-config-data\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.871182 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-scripts\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.884279 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.892191 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.896677 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4q6b2\" (UniqueName: \"kubernetes.io/projected/32e89ff3-0d60-4fd1-9c0d-831b92311165-kube-api-access-4q6b2\") pod \"ceilometer-0\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " pod="openstack/ceilometer-0" Oct 06 21:52:08 crc kubenswrapper[5014]: I1006 21:52:08.989490 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:52:09 crc kubenswrapper[5014]: I1006 21:52:09.468296 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:52:09 crc kubenswrapper[5014]: I1006 21:52:09.509167 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="486ed6a2-9dc8-49de-8601-09042e42e5b1" path="/var/lib/kubelet/pods/486ed6a2-9dc8-49de-8601-09042e42e5b1/volumes" Oct 06 21:52:09 crc kubenswrapper[5014]: I1006 21:52:09.511140 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77557f72-0ba6-4b60-a286-e17bc63d9ce6" path="/var/lib/kubelet/pods/77557f72-0ba6-4b60-a286-e17bc63d9ce6/volumes" Oct 06 21:52:09 crc kubenswrapper[5014]: I1006 21:52:09.542099 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"32e89ff3-0d60-4fd1-9c0d-831b92311165","Type":"ContainerStarted","Data":"cef1aa613c35b40230a30282b50b0b6a417cd2dc5f522cbf08e2b5a696a5278b"} Oct 06 21:52:10 crc kubenswrapper[5014]: I1006 21:52:10.565127 5014 generic.go:334] "Generic (PLEG): container finished" podID="388c3436-9432-48f2-ab10-6741959aebf5" containerID="7e310f1e94472fbc3a79b40ea198846da76836cf2835e1612a43c414454648ad" exitCode=0 Oct 06 21:52:10 crc kubenswrapper[5014]: I1006 21:52:10.565223 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-2qbdf" event={"ID":"388c3436-9432-48f2-ab10-6741959aebf5","Type":"ContainerDied","Data":"7e310f1e94472fbc3a79b40ea198846da76836cf2835e1612a43c414454648ad"} Oct 06 21:52:10 crc kubenswrapper[5014]: I1006 21:52:10.568413 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"32e89ff3-0d60-4fd1-9c0d-831b92311165","Type":"ContainerStarted","Data":"1e488f802cc8da011f91e05b3c478b47334580c1a8c26408bcadc4e55ec4cd81"} Oct 06 21:52:11 crc kubenswrapper[5014]: I1006 21:52:11.581905 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"32e89ff3-0d60-4fd1-9c0d-831b92311165","Type":"ContainerStarted","Data":"d23dd50b826bbf964b1ff4d5d05a5097816f29157fbba7ced1c970a4eec16c3b"} Oct 06 21:52:11 crc kubenswrapper[5014]: I1006 21:52:11.957813 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-2qbdf" Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.041809 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s7zn4\" (UniqueName: \"kubernetes.io/projected/388c3436-9432-48f2-ab10-6741959aebf5-kube-api-access-s7zn4\") pod \"388c3436-9432-48f2-ab10-6741959aebf5\" (UID: \"388c3436-9432-48f2-ab10-6741959aebf5\") " Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.042035 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-scripts\") pod \"388c3436-9432-48f2-ab10-6741959aebf5\" (UID: \"388c3436-9432-48f2-ab10-6741959aebf5\") " Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.042089 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-config-data\") pod \"388c3436-9432-48f2-ab10-6741959aebf5\" (UID: \"388c3436-9432-48f2-ab10-6741959aebf5\") " Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.042171 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-combined-ca-bundle\") pod \"388c3436-9432-48f2-ab10-6741959aebf5\" (UID: \"388c3436-9432-48f2-ab10-6741959aebf5\") " Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.050014 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/388c3436-9432-48f2-ab10-6741959aebf5-kube-api-access-s7zn4" (OuterVolumeSpecName: "kube-api-access-s7zn4") pod "388c3436-9432-48f2-ab10-6741959aebf5" (UID: "388c3436-9432-48f2-ab10-6741959aebf5"). InnerVolumeSpecName "kube-api-access-s7zn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.053250 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-scripts" (OuterVolumeSpecName: "scripts") pod "388c3436-9432-48f2-ab10-6741959aebf5" (UID: "388c3436-9432-48f2-ab10-6741959aebf5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.081708 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-config-data" (OuterVolumeSpecName: "config-data") pod "388c3436-9432-48f2-ab10-6741959aebf5" (UID: "388c3436-9432-48f2-ab10-6741959aebf5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.085940 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "388c3436-9432-48f2-ab10-6741959aebf5" (UID: "388c3436-9432-48f2-ab10-6741959aebf5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.144462 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.144501 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s7zn4\" (UniqueName: \"kubernetes.io/projected/388c3436-9432-48f2-ab10-6741959aebf5-kube-api-access-s7zn4\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.144515 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.144525 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/388c3436-9432-48f2-ab10-6741959aebf5-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.591895 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-2qbdf" event={"ID":"388c3436-9432-48f2-ab10-6741959aebf5","Type":"ContainerDied","Data":"341d9ac916662f18f219eb33e003adc2c1bca6f64a31565ff5e211cce6a1f2bb"} Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.591946 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="341d9ac916662f18f219eb33e003adc2c1bca6f64a31565ff5e211cce6a1f2bb" Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.592009 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-2qbdf" Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.600504 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"32e89ff3-0d60-4fd1-9c0d-831b92311165","Type":"ContainerStarted","Data":"ec20b9f41983e4b8a0ea0f9349995c36994eedc43877f52f349e3a2298e48326"} Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.771535 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.771814 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="97ca4b1b-5ee1-4adf-be50-3c7101840e66" containerName="nova-api-log" containerID="cri-o://2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002" gracePeriod=30 Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.771904 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="97ca4b1b-5ee1-4adf-be50-3c7101840e66" containerName="nova-api-api" containerID="cri-o://b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e" gracePeriod=30 Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.797421 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.797636 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="9887f684-7a28-4281-a5bb-eeff2f94685b" containerName="nova-scheduler-scheduler" containerID="cri-o://f33b11cca1a82c7d2a724565d5e19118456d6015d8f46a5efe12985f45daec0c" gracePeriod=30 Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.847869 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.848687 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8585acea-0fdc-4307-b39c-f98d2d50f03b" containerName="nova-metadata-log" containerID="cri-o://3cc32cc11df57713d2f0c869d7991655573a9987934687034e1040719c0f4388" gracePeriod=30 Oct 06 21:52:12 crc kubenswrapper[5014]: I1006 21:52:12.848992 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8585acea-0fdc-4307-b39c-f98d2d50f03b" containerName="nova-metadata-metadata" containerID="cri-o://1e6fbd1229c040b8597fa85ce50f7ab2492b64339a12b813195106004bd67cf2" gracePeriod=30 Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.469064 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.572299 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-public-tls-certs\") pod \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.572869 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlxkl\" (UniqueName: \"kubernetes.io/projected/97ca4b1b-5ee1-4adf-be50-3c7101840e66-kube-api-access-vlxkl\") pod \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.572905 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-config-data\") pod \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.572950 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97ca4b1b-5ee1-4adf-be50-3c7101840e66-logs\") pod \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.573075 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-internal-tls-certs\") pod \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.573116 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-combined-ca-bundle\") pod \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\" (UID: \"97ca4b1b-5ee1-4adf-be50-3c7101840e66\") " Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.573400 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97ca4b1b-5ee1-4adf-be50-3c7101840e66-logs" (OuterVolumeSpecName: "logs") pod "97ca4b1b-5ee1-4adf-be50-3c7101840e66" (UID: "97ca4b1b-5ee1-4adf-be50-3c7101840e66"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.573803 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97ca4b1b-5ee1-4adf-be50-3c7101840e66-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.578981 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97ca4b1b-5ee1-4adf-be50-3c7101840e66-kube-api-access-vlxkl" (OuterVolumeSpecName: "kube-api-access-vlxkl") pod "97ca4b1b-5ee1-4adf-be50-3c7101840e66" (UID: "97ca4b1b-5ee1-4adf-be50-3c7101840e66"). InnerVolumeSpecName "kube-api-access-vlxkl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.605403 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97ca4b1b-5ee1-4adf-be50-3c7101840e66" (UID: "97ca4b1b-5ee1-4adf-be50-3c7101840e66"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.613356 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-config-data" (OuterVolumeSpecName: "config-data") pod "97ca4b1b-5ee1-4adf-be50-3c7101840e66" (UID: "97ca4b1b-5ee1-4adf-be50-3c7101840e66"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.623800 5014 generic.go:334] "Generic (PLEG): container finished" podID="8585acea-0fdc-4307-b39c-f98d2d50f03b" containerID="3cc32cc11df57713d2f0c869d7991655573a9987934687034e1040719c0f4388" exitCode=143 Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.623872 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8585acea-0fdc-4307-b39c-f98d2d50f03b","Type":"ContainerDied","Data":"3cc32cc11df57713d2f0c869d7991655573a9987934687034e1040719c0f4388"} Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.629615 5014 generic.go:334] "Generic (PLEG): container finished" podID="97ca4b1b-5ee1-4adf-be50-3c7101840e66" containerID="b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e" exitCode=0 Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.629679 5014 generic.go:334] "Generic (PLEG): container finished" podID="97ca4b1b-5ee1-4adf-be50-3c7101840e66" containerID="2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002" exitCode=143 Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.629731 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"97ca4b1b-5ee1-4adf-be50-3c7101840e66","Type":"ContainerDied","Data":"b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e"} Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.629763 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"97ca4b1b-5ee1-4adf-be50-3c7101840e66","Type":"ContainerDied","Data":"2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002"} Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.629778 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"97ca4b1b-5ee1-4adf-be50-3c7101840e66","Type":"ContainerDied","Data":"4da6c336351568efe00a6c438328748c0c6aea8c1b99a56af86c12f8d79e287f"} Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.629796 5014 scope.go:117] "RemoveContainer" containerID="b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.629987 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.633537 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "97ca4b1b-5ee1-4adf-be50-3c7101840e66" (UID: "97ca4b1b-5ee1-4adf-be50-3c7101840e66"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.635105 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"32e89ff3-0d60-4fd1-9c0d-831b92311165","Type":"ContainerStarted","Data":"1c78227e99eced5448ed246e1bcbef1171c8d55eecc061f23758f282026f4fed"} Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.635284 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.649934 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "97ca4b1b-5ee1-4adf-be50-3c7101840e66" (UID: "97ca4b1b-5ee1-4adf-be50-3c7101840e66"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.673186 5014 scope.go:117] "RemoveContainer" containerID="2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.675332 5014 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.675356 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.675365 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlxkl\" (UniqueName: \"kubernetes.io/projected/97ca4b1b-5ee1-4adf-be50-3c7101840e66-kube-api-access-vlxkl\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.675375 5014 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.675383 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97ca4b1b-5ee1-4adf-be50-3c7101840e66-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.698362 5014 scope.go:117] "RemoveContainer" containerID="b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e" Oct 06 21:52:13 crc kubenswrapper[5014]: E1006 21:52:13.698955 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e\": container with ID starting with b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e not found: ID does not exist" containerID="b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.698987 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e"} err="failed to get container status \"b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e\": rpc error: code = NotFound desc = could not find container \"b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e\": container with ID starting with b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e not found: ID does not exist" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.699011 5014 scope.go:117] "RemoveContainer" containerID="2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002" Oct 06 21:52:13 crc kubenswrapper[5014]: E1006 21:52:13.699519 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002\": container with ID starting with 2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002 not found: ID does not exist" containerID="2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.699573 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002"} err="failed to get container status \"2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002\": rpc error: code = NotFound desc = could not find container \"2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002\": container with ID starting with 2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002 not found: ID does not exist" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.699600 5014 scope.go:117] "RemoveContainer" containerID="b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.699920 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e"} err="failed to get container status \"b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e\": rpc error: code = NotFound desc = could not find container \"b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e\": container with ID starting with b868e7fe88058f4bb16873494bb6ecfab9ce38f49d926be4fab261428b73e05e not found: ID does not exist" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.700024 5014 scope.go:117] "RemoveContainer" containerID="2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.700395 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002"} err="failed to get container status \"2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002\": rpc error: code = NotFound desc = could not find container \"2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002\": container with ID starting with 2ce598a393ef629266ab36a007bccf7d359f5c221af62cdac62705a980168002 not found: ID does not exist" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.967710 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.164521837 podStartE2EDuration="5.967689027s" podCreationTimestamp="2025-10-06 21:52:08 +0000 UTC" firstStartedPulling="2025-10-06 21:52:09.494250882 +0000 UTC m=+1274.787287616" lastFinishedPulling="2025-10-06 21:52:13.297418072 +0000 UTC m=+1278.590454806" observedRunningTime="2025-10-06 21:52:13.663440986 +0000 UTC m=+1278.956477720" watchObservedRunningTime="2025-10-06 21:52:13.967689027 +0000 UTC m=+1279.260725761" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.971301 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.978484 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.992912 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 06 21:52:13 crc kubenswrapper[5014]: E1006 21:52:13.993317 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="388c3436-9432-48f2-ab10-6741959aebf5" containerName="nova-manage" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.993336 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="388c3436-9432-48f2-ab10-6741959aebf5" containerName="nova-manage" Oct 06 21:52:13 crc kubenswrapper[5014]: E1006 21:52:13.993348 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97ca4b1b-5ee1-4adf-be50-3c7101840e66" containerName="nova-api-log" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.993355 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="97ca4b1b-5ee1-4adf-be50-3c7101840e66" containerName="nova-api-log" Oct 06 21:52:13 crc kubenswrapper[5014]: E1006 21:52:13.993388 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97ca4b1b-5ee1-4adf-be50-3c7101840e66" containerName="nova-api-api" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.993396 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="97ca4b1b-5ee1-4adf-be50-3c7101840e66" containerName="nova-api-api" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.993561 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="97ca4b1b-5ee1-4adf-be50-3c7101840e66" containerName="nova-api-api" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.993572 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="97ca4b1b-5ee1-4adf-be50-3c7101840e66" containerName="nova-api-log" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.993592 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="388c3436-9432-48f2-ab10-6741959aebf5" containerName="nova-manage" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.994589 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.997399 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.997857 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Oct 06 21:52:13 crc kubenswrapper[5014]: I1006 21:52:13.998108 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.008387 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.083759 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.083898 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-config-data\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.083939 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c1e8f70-227b-40e0-aceb-470eed382180-logs\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.083977 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.084092 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-public-tls-certs\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.084147 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhkvp\" (UniqueName: \"kubernetes.io/projected/3c1e8f70-227b-40e0-aceb-470eed382180-kube-api-access-vhkvp\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.186070 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-config-data\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.186339 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c1e8f70-227b-40e0-aceb-470eed382180-logs\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.186443 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.186571 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-public-tls-certs\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.186711 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhkvp\" (UniqueName: \"kubernetes.io/projected/3c1e8f70-227b-40e0-aceb-470eed382180-kube-api-access-vhkvp\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.186855 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.187317 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c1e8f70-227b-40e0-aceb-470eed382180-logs\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.191032 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.191421 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-public-tls-certs\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.192203 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-config-data\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.193357 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.212806 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhkvp\" (UniqueName: \"kubernetes.io/projected/3c1e8f70-227b-40e0-aceb-470eed382180-kube-api-access-vhkvp\") pod \"nova-api-0\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: I1006 21:52:14.358505 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 06 21:52:14 crc kubenswrapper[5014]: E1006 21:52:14.375138 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f33b11cca1a82c7d2a724565d5e19118456d6015d8f46a5efe12985f45daec0c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 06 21:52:14 crc kubenswrapper[5014]: E1006 21:52:14.377497 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f33b11cca1a82c7d2a724565d5e19118456d6015d8f46a5efe12985f45daec0c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 06 21:52:14 crc kubenswrapper[5014]: E1006 21:52:14.379516 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f33b11cca1a82c7d2a724565d5e19118456d6015d8f46a5efe12985f45daec0c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 06 21:52:14 crc kubenswrapper[5014]: E1006 21:52:14.379562 5014 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="9887f684-7a28-4281-a5bb-eeff2f94685b" containerName="nova-scheduler-scheduler" Oct 06 21:52:15 crc kubenswrapper[5014]: I1006 21:52:14.870445 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:52:15 crc kubenswrapper[5014]: I1006 21:52:15.502326 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97ca4b1b-5ee1-4adf-be50-3c7101840e66" path="/var/lib/kubelet/pods/97ca4b1b-5ee1-4adf-be50-3c7101840e66/volumes" Oct 06 21:52:15 crc kubenswrapper[5014]: I1006 21:52:15.656831 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3c1e8f70-227b-40e0-aceb-470eed382180","Type":"ContainerStarted","Data":"b619016e22f6883fb4aa46369b1d687e057d2e2ffd28c9ca34b55365fb9839c5"} Oct 06 21:52:15 crc kubenswrapper[5014]: I1006 21:52:15.657084 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3c1e8f70-227b-40e0-aceb-470eed382180","Type":"ContainerStarted","Data":"1427a4912ecba9f84ea4a41b472f8f218b0c4df4bc3dd48139b02a15ecfe2a67"} Oct 06 21:52:15 crc kubenswrapper[5014]: I1006 21:52:15.657160 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3c1e8f70-227b-40e0-aceb-470eed382180","Type":"ContainerStarted","Data":"b53b6ef9cb749992ee048456f7510b1eed0642e98177c348d1062f39223aeac6"} Oct 06 21:52:15 crc kubenswrapper[5014]: I1006 21:52:15.687516 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.687491101 podStartE2EDuration="2.687491101s" podCreationTimestamp="2025-10-06 21:52:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:52:15.680389291 +0000 UTC m=+1280.973426045" watchObservedRunningTime="2025-10-06 21:52:15.687491101 +0000 UTC m=+1280.980527835" Oct 06 21:52:15 crc kubenswrapper[5014]: I1006 21:52:15.986999 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="8585acea-0fdc-4307-b39c-f98d2d50f03b" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.192:8775/\": read tcp 10.217.0.2:45224->10.217.0.192:8775: read: connection reset by peer" Oct 06 21:52:15 crc kubenswrapper[5014]: I1006 21:52:15.987007 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="8585acea-0fdc-4307-b39c-f98d2d50f03b" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.192:8775/\": read tcp 10.217.0.2:45222->10.217.0.192:8775: read: connection reset by peer" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.441679 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.540222 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-combined-ca-bundle\") pod \"8585acea-0fdc-4307-b39c-f98d2d50f03b\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.540487 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7hpc\" (UniqueName: \"kubernetes.io/projected/8585acea-0fdc-4307-b39c-f98d2d50f03b-kube-api-access-j7hpc\") pod \"8585acea-0fdc-4307-b39c-f98d2d50f03b\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.540535 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-config-data\") pod \"8585acea-0fdc-4307-b39c-f98d2d50f03b\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.540657 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8585acea-0fdc-4307-b39c-f98d2d50f03b-logs\") pod \"8585acea-0fdc-4307-b39c-f98d2d50f03b\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.540696 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-nova-metadata-tls-certs\") pod \"8585acea-0fdc-4307-b39c-f98d2d50f03b\" (UID: \"8585acea-0fdc-4307-b39c-f98d2d50f03b\") " Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.543127 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8585acea-0fdc-4307-b39c-f98d2d50f03b-logs" (OuterVolumeSpecName: "logs") pod "8585acea-0fdc-4307-b39c-f98d2d50f03b" (UID: "8585acea-0fdc-4307-b39c-f98d2d50f03b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.553890 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8585acea-0fdc-4307-b39c-f98d2d50f03b-kube-api-access-j7hpc" (OuterVolumeSpecName: "kube-api-access-j7hpc") pod "8585acea-0fdc-4307-b39c-f98d2d50f03b" (UID: "8585acea-0fdc-4307-b39c-f98d2d50f03b"). InnerVolumeSpecName "kube-api-access-j7hpc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.591716 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8585acea-0fdc-4307-b39c-f98d2d50f03b" (UID: "8585acea-0fdc-4307-b39c-f98d2d50f03b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.593289 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-config-data" (OuterVolumeSpecName: "config-data") pod "8585acea-0fdc-4307-b39c-f98d2d50f03b" (UID: "8585acea-0fdc-4307-b39c-f98d2d50f03b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.610793 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "8585acea-0fdc-4307-b39c-f98d2d50f03b" (UID: "8585acea-0fdc-4307-b39c-f98d2d50f03b"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.644138 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.644181 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7hpc\" (UniqueName: \"kubernetes.io/projected/8585acea-0fdc-4307-b39c-f98d2d50f03b-kube-api-access-j7hpc\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.644195 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.644203 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8585acea-0fdc-4307-b39c-f98d2d50f03b-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.644213 5014 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8585acea-0fdc-4307-b39c-f98d2d50f03b-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.668119 5014 generic.go:334] "Generic (PLEG): container finished" podID="8585acea-0fdc-4307-b39c-f98d2d50f03b" containerID="1e6fbd1229c040b8597fa85ce50f7ab2492b64339a12b813195106004bd67cf2" exitCode=0 Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.668374 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8585acea-0fdc-4307-b39c-f98d2d50f03b","Type":"ContainerDied","Data":"1e6fbd1229c040b8597fa85ce50f7ab2492b64339a12b813195106004bd67cf2"} Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.668421 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8585acea-0fdc-4307-b39c-f98d2d50f03b","Type":"ContainerDied","Data":"76ac17fa038545a5a76ef4bc4107a7b4c1f5e6b4a6c3a631b9fd2cb34902e80f"} Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.668440 5014 scope.go:117] "RemoveContainer" containerID="1e6fbd1229c040b8597fa85ce50f7ab2492b64339a12b813195106004bd67cf2" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.668780 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.699456 5014 scope.go:117] "RemoveContainer" containerID="3cc32cc11df57713d2f0c869d7991655573a9987934687034e1040719c0f4388" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.705804 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.727536 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.731934 5014 scope.go:117] "RemoveContainer" containerID="1e6fbd1229c040b8597fa85ce50f7ab2492b64339a12b813195106004bd67cf2" Oct 06 21:52:16 crc kubenswrapper[5014]: E1006 21:52:16.732848 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e6fbd1229c040b8597fa85ce50f7ab2492b64339a12b813195106004bd67cf2\": container with ID starting with 1e6fbd1229c040b8597fa85ce50f7ab2492b64339a12b813195106004bd67cf2 not found: ID does not exist" containerID="1e6fbd1229c040b8597fa85ce50f7ab2492b64339a12b813195106004bd67cf2" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.732898 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e6fbd1229c040b8597fa85ce50f7ab2492b64339a12b813195106004bd67cf2"} err="failed to get container status \"1e6fbd1229c040b8597fa85ce50f7ab2492b64339a12b813195106004bd67cf2\": rpc error: code = NotFound desc = could not find container \"1e6fbd1229c040b8597fa85ce50f7ab2492b64339a12b813195106004bd67cf2\": container with ID starting with 1e6fbd1229c040b8597fa85ce50f7ab2492b64339a12b813195106004bd67cf2 not found: ID does not exist" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.732924 5014 scope.go:117] "RemoveContainer" containerID="3cc32cc11df57713d2f0c869d7991655573a9987934687034e1040719c0f4388" Oct 06 21:52:16 crc kubenswrapper[5014]: E1006 21:52:16.733287 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cc32cc11df57713d2f0c869d7991655573a9987934687034e1040719c0f4388\": container with ID starting with 3cc32cc11df57713d2f0c869d7991655573a9987934687034e1040719c0f4388 not found: ID does not exist" containerID="3cc32cc11df57713d2f0c869d7991655573a9987934687034e1040719c0f4388" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.733358 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cc32cc11df57713d2f0c869d7991655573a9987934687034e1040719c0f4388"} err="failed to get container status \"3cc32cc11df57713d2f0c869d7991655573a9987934687034e1040719c0f4388\": rpc error: code = NotFound desc = could not find container \"3cc32cc11df57713d2f0c869d7991655573a9987934687034e1040719c0f4388\": container with ID starting with 3cc32cc11df57713d2f0c869d7991655573a9987934687034e1040719c0f4388 not found: ID does not exist" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.744332 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:52:16 crc kubenswrapper[5014]: E1006 21:52:16.747418 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8585acea-0fdc-4307-b39c-f98d2d50f03b" containerName="nova-metadata-metadata" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.747645 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8585acea-0fdc-4307-b39c-f98d2d50f03b" containerName="nova-metadata-metadata" Oct 06 21:52:16 crc kubenswrapper[5014]: E1006 21:52:16.747759 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8585acea-0fdc-4307-b39c-f98d2d50f03b" containerName="nova-metadata-log" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.747838 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8585acea-0fdc-4307-b39c-f98d2d50f03b" containerName="nova-metadata-log" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.748347 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="8585acea-0fdc-4307-b39c-f98d2d50f03b" containerName="nova-metadata-log" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.748792 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="8585acea-0fdc-4307-b39c-f98d2d50f03b" containerName="nova-metadata-metadata" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.750281 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.754672 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.754744 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.756693 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.848805 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.849165 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1dfeacf8-a072-4b44-bed9-618acd31fb6f-logs\") pod \"nova-metadata-0\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.849287 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpwq4\" (UniqueName: \"kubernetes.io/projected/1dfeacf8-a072-4b44-bed9-618acd31fb6f-kube-api-access-kpwq4\") pod \"nova-metadata-0\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.849487 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.849573 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-config-data\") pod \"nova-metadata-0\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.952107 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.952195 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-config-data\") pod \"nova-metadata-0\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.952286 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1dfeacf8-a072-4b44-bed9-618acd31fb6f-logs\") pod \"nova-metadata-0\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.952314 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.952351 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpwq4\" (UniqueName: \"kubernetes.io/projected/1dfeacf8-a072-4b44-bed9-618acd31fb6f-kube-api-access-kpwq4\") pod \"nova-metadata-0\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.952942 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1dfeacf8-a072-4b44-bed9-618acd31fb6f-logs\") pod \"nova-metadata-0\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.955870 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-config-data\") pod \"nova-metadata-0\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.956533 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.956758 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " pod="openstack/nova-metadata-0" Oct 06 21:52:16 crc kubenswrapper[5014]: I1006 21:52:16.972006 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpwq4\" (UniqueName: \"kubernetes.io/projected/1dfeacf8-a072-4b44-bed9-618acd31fb6f-kube-api-access-kpwq4\") pod \"nova-metadata-0\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " pod="openstack/nova-metadata-0" Oct 06 21:52:17 crc kubenswrapper[5014]: I1006 21:52:17.071601 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 06 21:52:17 crc kubenswrapper[5014]: I1006 21:52:17.501968 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8585acea-0fdc-4307-b39c-f98d2d50f03b" path="/var/lib/kubelet/pods/8585acea-0fdc-4307-b39c-f98d2d50f03b/volumes" Oct 06 21:52:17 crc kubenswrapper[5014]: I1006 21:52:17.565727 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:52:17 crc kubenswrapper[5014]: W1006 21:52:17.585872 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1dfeacf8_a072_4b44_bed9_618acd31fb6f.slice/crio-73bb5c470d731091eacea54b1e2fbb3b855ee9d6a47fd0909ea739c6ea8256af WatchSource:0}: Error finding container 73bb5c470d731091eacea54b1e2fbb3b855ee9d6a47fd0909ea739c6ea8256af: Status 404 returned error can't find the container with id 73bb5c470d731091eacea54b1e2fbb3b855ee9d6a47fd0909ea739c6ea8256af Oct 06 21:52:17 crc kubenswrapper[5014]: I1006 21:52:17.676987 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1dfeacf8-a072-4b44-bed9-618acd31fb6f","Type":"ContainerStarted","Data":"73bb5c470d731091eacea54b1e2fbb3b855ee9d6a47fd0909ea739c6ea8256af"} Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.641686 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.688752 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9887f684-7a28-4281-a5bb-eeff2f94685b-combined-ca-bundle\") pod \"9887f684-7a28-4281-a5bb-eeff2f94685b\" (UID: \"9887f684-7a28-4281-a5bb-eeff2f94685b\") " Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.688907 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6lgwm\" (UniqueName: \"kubernetes.io/projected/9887f684-7a28-4281-a5bb-eeff2f94685b-kube-api-access-6lgwm\") pod \"9887f684-7a28-4281-a5bb-eeff2f94685b\" (UID: \"9887f684-7a28-4281-a5bb-eeff2f94685b\") " Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.688971 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9887f684-7a28-4281-a5bb-eeff2f94685b-config-data\") pod \"9887f684-7a28-4281-a5bb-eeff2f94685b\" (UID: \"9887f684-7a28-4281-a5bb-eeff2f94685b\") " Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.698493 5014 generic.go:334] "Generic (PLEG): container finished" podID="9887f684-7a28-4281-a5bb-eeff2f94685b" containerID="f33b11cca1a82c7d2a724565d5e19118456d6015d8f46a5efe12985f45daec0c" exitCode=0 Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.698598 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9887f684-7a28-4281-a5bb-eeff2f94685b","Type":"ContainerDied","Data":"f33b11cca1a82c7d2a724565d5e19118456d6015d8f46a5efe12985f45daec0c"} Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.698664 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9887f684-7a28-4281-a5bb-eeff2f94685b","Type":"ContainerDied","Data":"a3f6fb045e8a90927e32a13ae015011e60668ab54b2f3b2747c2f6547a0120d8"} Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.698685 5014 scope.go:117] "RemoveContainer" containerID="f33b11cca1a82c7d2a724565d5e19118456d6015d8f46a5efe12985f45daec0c" Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.698825 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.700180 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9887f684-7a28-4281-a5bb-eeff2f94685b-kube-api-access-6lgwm" (OuterVolumeSpecName: "kube-api-access-6lgwm") pod "9887f684-7a28-4281-a5bb-eeff2f94685b" (UID: "9887f684-7a28-4281-a5bb-eeff2f94685b"). InnerVolumeSpecName "kube-api-access-6lgwm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.707984 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1dfeacf8-a072-4b44-bed9-618acd31fb6f","Type":"ContainerStarted","Data":"6d9c705385f5fdafb7775a2b08e388c98d6323f0cc8e8398d03748b74369b48a"} Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.708031 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1dfeacf8-a072-4b44-bed9-618acd31fb6f","Type":"ContainerStarted","Data":"adb9b9301193619d4a64043aa363957a79778e3963acf98e99151a9c5d6dc68d"} Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.726505 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9887f684-7a28-4281-a5bb-eeff2f94685b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9887f684-7a28-4281-a5bb-eeff2f94685b" (UID: "9887f684-7a28-4281-a5bb-eeff2f94685b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.749456 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9887f684-7a28-4281-a5bb-eeff2f94685b-config-data" (OuterVolumeSpecName: "config-data") pod "9887f684-7a28-4281-a5bb-eeff2f94685b" (UID: "9887f684-7a28-4281-a5bb-eeff2f94685b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.755196 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.755172532 podStartE2EDuration="2.755172532s" podCreationTimestamp="2025-10-06 21:52:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:52:18.744754215 +0000 UTC m=+1284.037790959" watchObservedRunningTime="2025-10-06 21:52:18.755172532 +0000 UTC m=+1284.048209286" Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.773260 5014 scope.go:117] "RemoveContainer" containerID="f33b11cca1a82c7d2a724565d5e19118456d6015d8f46a5efe12985f45daec0c" Oct 06 21:52:18 crc kubenswrapper[5014]: E1006 21:52:18.774108 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f33b11cca1a82c7d2a724565d5e19118456d6015d8f46a5efe12985f45daec0c\": container with ID starting with f33b11cca1a82c7d2a724565d5e19118456d6015d8f46a5efe12985f45daec0c not found: ID does not exist" containerID="f33b11cca1a82c7d2a724565d5e19118456d6015d8f46a5efe12985f45daec0c" Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.774154 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f33b11cca1a82c7d2a724565d5e19118456d6015d8f46a5efe12985f45daec0c"} err="failed to get container status \"f33b11cca1a82c7d2a724565d5e19118456d6015d8f46a5efe12985f45daec0c\": rpc error: code = NotFound desc = could not find container \"f33b11cca1a82c7d2a724565d5e19118456d6015d8f46a5efe12985f45daec0c\": container with ID starting with f33b11cca1a82c7d2a724565d5e19118456d6015d8f46a5efe12985f45daec0c not found: ID does not exist" Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.792538 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6lgwm\" (UniqueName: \"kubernetes.io/projected/9887f684-7a28-4281-a5bb-eeff2f94685b-kube-api-access-6lgwm\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.792813 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9887f684-7a28-4281-a5bb-eeff2f94685b-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:18 crc kubenswrapper[5014]: I1006 21:52:18.792856 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9887f684-7a28-4281-a5bb-eeff2f94685b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.037822 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.092680 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.116907 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:52:19 crc kubenswrapper[5014]: E1006 21:52:19.117529 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9887f684-7a28-4281-a5bb-eeff2f94685b" containerName="nova-scheduler-scheduler" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.117553 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9887f684-7a28-4281-a5bb-eeff2f94685b" containerName="nova-scheduler-scheduler" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.117834 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="9887f684-7a28-4281-a5bb-eeff2f94685b" containerName="nova-scheduler-scheduler" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.118790 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.121252 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.135476 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.208959 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cefbd6b6-3aa3-459d-9f8d-060736f4de92-config-data\") pod \"nova-scheduler-0\" (UID: \"cefbd6b6-3aa3-459d-9f8d-060736f4de92\") " pod="openstack/nova-scheduler-0" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.209114 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cefbd6b6-3aa3-459d-9f8d-060736f4de92-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cefbd6b6-3aa3-459d-9f8d-060736f4de92\") " pod="openstack/nova-scheduler-0" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.209188 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwckt\" (UniqueName: \"kubernetes.io/projected/cefbd6b6-3aa3-459d-9f8d-060736f4de92-kube-api-access-jwckt\") pod \"nova-scheduler-0\" (UID: \"cefbd6b6-3aa3-459d-9f8d-060736f4de92\") " pod="openstack/nova-scheduler-0" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.311136 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cefbd6b6-3aa3-459d-9f8d-060736f4de92-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cefbd6b6-3aa3-459d-9f8d-060736f4de92\") " pod="openstack/nova-scheduler-0" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.311249 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwckt\" (UniqueName: \"kubernetes.io/projected/cefbd6b6-3aa3-459d-9f8d-060736f4de92-kube-api-access-jwckt\") pod \"nova-scheduler-0\" (UID: \"cefbd6b6-3aa3-459d-9f8d-060736f4de92\") " pod="openstack/nova-scheduler-0" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.311297 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cefbd6b6-3aa3-459d-9f8d-060736f4de92-config-data\") pod \"nova-scheduler-0\" (UID: \"cefbd6b6-3aa3-459d-9f8d-060736f4de92\") " pod="openstack/nova-scheduler-0" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.319040 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cefbd6b6-3aa3-459d-9f8d-060736f4de92-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cefbd6b6-3aa3-459d-9f8d-060736f4de92\") " pod="openstack/nova-scheduler-0" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.321112 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cefbd6b6-3aa3-459d-9f8d-060736f4de92-config-data\") pod \"nova-scheduler-0\" (UID: \"cefbd6b6-3aa3-459d-9f8d-060736f4de92\") " pod="openstack/nova-scheduler-0" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.336487 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwckt\" (UniqueName: \"kubernetes.io/projected/cefbd6b6-3aa3-459d-9f8d-060736f4de92-kube-api-access-jwckt\") pod \"nova-scheduler-0\" (UID: \"cefbd6b6-3aa3-459d-9f8d-060736f4de92\") " pod="openstack/nova-scheduler-0" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.440190 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.499323 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9887f684-7a28-4281-a5bb-eeff2f94685b" path="/var/lib/kubelet/pods/9887f684-7a28-4281-a5bb-eeff2f94685b/volumes" Oct 06 21:52:19 crc kubenswrapper[5014]: I1006 21:52:19.915219 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:52:19 crc kubenswrapper[5014]: W1006 21:52:19.916443 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcefbd6b6_3aa3_459d_9f8d_060736f4de92.slice/crio-358e1821e030ef51d6ba8f0efe6360a432df751a1831462c97c302d5fdb0faab WatchSource:0}: Error finding container 358e1821e030ef51d6ba8f0efe6360a432df751a1831462c97c302d5fdb0faab: Status 404 returned error can't find the container with id 358e1821e030ef51d6ba8f0efe6360a432df751a1831462c97c302d5fdb0faab Oct 06 21:52:20 crc kubenswrapper[5014]: I1006 21:52:20.728124 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cefbd6b6-3aa3-459d-9f8d-060736f4de92","Type":"ContainerStarted","Data":"b521f92d690d7f4582f8adf8d2826fa33c967ac340f2621ad4c5efb64bf29cfb"} Oct 06 21:52:20 crc kubenswrapper[5014]: I1006 21:52:20.728845 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cefbd6b6-3aa3-459d-9f8d-060736f4de92","Type":"ContainerStarted","Data":"358e1821e030ef51d6ba8f0efe6360a432df751a1831462c97c302d5fdb0faab"} Oct 06 21:52:20 crc kubenswrapper[5014]: I1006 21:52:20.751236 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.7512177420000001 podStartE2EDuration="1.751217742s" podCreationTimestamp="2025-10-06 21:52:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:52:20.744212487 +0000 UTC m=+1286.037249221" watchObservedRunningTime="2025-10-06 21:52:20.751217742 +0000 UTC m=+1286.044254466" Oct 06 21:52:22 crc kubenswrapper[5014]: I1006 21:52:22.072173 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 06 21:52:22 crc kubenswrapper[5014]: I1006 21:52:22.072550 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 06 21:52:24 crc kubenswrapper[5014]: I1006 21:52:24.359546 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 06 21:52:24 crc kubenswrapper[5014]: I1006 21:52:24.360023 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 06 21:52:24 crc kubenswrapper[5014]: I1006 21:52:24.440540 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Oct 06 21:52:25 crc kubenswrapper[5014]: I1006 21:52:25.371817 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3c1e8f70-227b-40e0-aceb-470eed382180" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.204:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 06 21:52:25 crc kubenswrapper[5014]: I1006 21:52:25.371817 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3c1e8f70-227b-40e0-aceb-470eed382180" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.204:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 06 21:52:27 crc kubenswrapper[5014]: I1006 21:52:27.071938 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 06 21:52:27 crc kubenswrapper[5014]: I1006 21:52:27.072503 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 06 21:52:28 crc kubenswrapper[5014]: I1006 21:52:28.088752 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="1dfeacf8-a072-4b44-bed9-618acd31fb6f" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 06 21:52:28 crc kubenswrapper[5014]: I1006 21:52:28.088792 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="1dfeacf8-a072-4b44-bed9-618acd31fb6f" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 06 21:52:29 crc kubenswrapper[5014]: I1006 21:52:29.441336 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Oct 06 21:52:29 crc kubenswrapper[5014]: I1006 21:52:29.498970 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Oct 06 21:52:29 crc kubenswrapper[5014]: I1006 21:52:29.865379 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Oct 06 21:52:34 crc kubenswrapper[5014]: I1006 21:52:34.365184 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 06 21:52:34 crc kubenswrapper[5014]: I1006 21:52:34.366037 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 06 21:52:34 crc kubenswrapper[5014]: I1006 21:52:34.368276 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 06 21:52:34 crc kubenswrapper[5014]: I1006 21:52:34.376941 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 06 21:52:34 crc kubenswrapper[5014]: I1006 21:52:34.889139 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 06 21:52:34 crc kubenswrapper[5014]: I1006 21:52:34.897320 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 06 21:52:37 crc kubenswrapper[5014]: I1006 21:52:37.090923 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 06 21:52:37 crc kubenswrapper[5014]: I1006 21:52:37.091423 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 06 21:52:37 crc kubenswrapper[5014]: I1006 21:52:37.098726 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 06 21:52:37 crc kubenswrapper[5014]: I1006 21:52:37.098910 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 06 21:52:39 crc kubenswrapper[5014]: I1006 21:52:39.001179 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Oct 06 21:53:01 crc kubenswrapper[5014]: I1006 21:53:01.540668 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Oct 06 21:53:01 crc kubenswrapper[5014]: I1006 21:53:01.541407 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="db1ef19c-c5bd-4bf1-a58c-73ff198caaa9" containerName="openstackclient" containerID="cri-o://085db7e17be44d9a79cea2381416abb7231f57828c4dbcfdbdfc446dac2fb6f0" gracePeriod=2 Oct 06 21:53:01 crc kubenswrapper[5014]: I1006 21:53:01.550659 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Oct 06 21:53:01 crc kubenswrapper[5014]: E1006 21:53:01.635918 5014 secret.go:188] Couldn't get secret openstack/glance-scripts: secret "glance-scripts" not found Oct 06 21:53:01 crc kubenswrapper[5014]: E1006 21:53:01.635988 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-scripts podName:4a3bf50b-cb91-4201-affd-0c42d3585df2 nodeName:}" failed. No retries permitted until 2025-10-06 21:53:02.135970722 +0000 UTC m=+1327.429007456 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-scripts") pod "glance-default-external-api-0" (UID: "4a3bf50b-cb91-4201-affd-0c42d3585df2") : secret "glance-scripts" not found Oct 06 21:53:01 crc kubenswrapper[5014]: I1006 21:53:01.755612 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 06 21:53:01 crc kubenswrapper[5014]: I1006 21:53:01.782762 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-f4vpp"] Oct 06 21:53:01 crc kubenswrapper[5014]: I1006 21:53:01.841154 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-9zq5k"] Oct 06 21:53:01 crc kubenswrapper[5014]: I1006 21:53:01.841353 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-9zq5k" podUID="0d674559-08b5-41c9-8783-a5e42504fb3e" containerName="openstack-network-exporter" containerID="cri-o://4548df78c36932efb3465251204787ed31111710ed70b8eb0c470d23a627a6c7" gracePeriod=30 Oct 06 21:53:01 crc kubenswrapper[5014]: I1006 21:53:01.952562 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-fwbdt"] Oct 06 21:53:02 crc kubenswrapper[5014]: E1006 21:53:02.066077 5014 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Oct 06 21:53:02 crc kubenswrapper[5014]: E1006 21:53:02.066164 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-config-data podName:c8f59d7d-f71b-46b0-bd32-476a2517a3b6 nodeName:}" failed. No retries permitted until 2025-10-06 21:53:02.566134825 +0000 UTC m=+1327.859171559 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-config-data") pod "rabbitmq-cell1-server-0" (UID: "c8f59d7d-f71b-46b0-bd32-476a2517a3b6") : configmap "rabbitmq-cell1-config-data" not found Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.108762 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder3af6-account-delete-44b4q"] Oct 06 21:53:02 crc kubenswrapper[5014]: E1006 21:53:02.109206 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db1ef19c-c5bd-4bf1-a58c-73ff198caaa9" containerName="openstackclient" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.109223 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="db1ef19c-c5bd-4bf1-a58c-73ff198caaa9" containerName="openstackclient" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.109447 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="db1ef19c-c5bd-4bf1-a58c-73ff198caaa9" containerName="openstackclient" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.110113 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder3af6-account-delete-44b4q" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.123681 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance6d2b-account-delete-n47pm"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.124941 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance6d2b-account-delete-n47pm" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.133250 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder3af6-account-delete-44b4q"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.143414 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance6d2b-account-delete-n47pm"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.166699 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nbkx\" (UniqueName: \"kubernetes.io/projected/2d91cde4-29d5-4947-b2ee-73e29ac244c2-kube-api-access-5nbkx\") pod \"cinder3af6-account-delete-44b4q\" (UID: \"2d91cde4-29d5-4947-b2ee-73e29ac244c2\") " pod="openstack/cinder3af6-account-delete-44b4q" Oct 06 21:53:02 crc kubenswrapper[5014]: E1006 21:53:02.166872 5014 secret.go:188] Couldn't get secret openstack/glance-scripts: secret "glance-scripts" not found Oct 06 21:53:02 crc kubenswrapper[5014]: E1006 21:53:02.166919 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-scripts podName:4a3bf50b-cb91-4201-affd-0c42d3585df2 nodeName:}" failed. No retries permitted until 2025-10-06 21:53:03.166900575 +0000 UTC m=+1328.459937309 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-scripts") pod "glance-default-external-api-0" (UID: "4a3bf50b-cb91-4201-affd-0c42d3585df2") : secret "glance-scripts" not found Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.203997 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-79rxq"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.227606 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-79rxq"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.259598 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-9zq5k_0d674559-08b5-41c9-8783-a5e42504fb3e/openstack-network-exporter/0.log" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.259652 5014 generic.go:334] "Generic (PLEG): container finished" podID="0d674559-08b5-41c9-8783-a5e42504fb3e" containerID="4548df78c36932efb3465251204787ed31111710ed70b8eb0c470d23a627a6c7" exitCode=2 Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.259679 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-9zq5k" event={"ID":"0d674559-08b5-41c9-8783-a5e42504fb3e","Type":"ContainerDied","Data":"4548df78c36932efb3465251204787ed31111710ed70b8eb0c470d23a627a6c7"} Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.265198 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.267708 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5ld4\" (UniqueName: \"kubernetes.io/projected/c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c-kube-api-access-g5ld4\") pod \"glance6d2b-account-delete-n47pm\" (UID: \"c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c\") " pod="openstack/glance6d2b-account-delete-n47pm" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.267758 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nbkx\" (UniqueName: \"kubernetes.io/projected/2d91cde4-29d5-4947-b2ee-73e29ac244c2-kube-api-access-5nbkx\") pod \"cinder3af6-account-delete-44b4q\" (UID: \"2d91cde4-29d5-4947-b2ee-73e29ac244c2\") " pod="openstack/cinder3af6-account-delete-44b4q" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.274713 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbicanb6a2-account-delete-c6b5p"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.276043 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbicanb6a2-account-delete-c6b5p" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.315791 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbicanb6a2-account-delete-c6b5p"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.317726 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nbkx\" (UniqueName: \"kubernetes.io/projected/2d91cde4-29d5-4947-b2ee-73e29ac244c2-kube-api-access-5nbkx\") pod \"cinder3af6-account-delete-44b4q\" (UID: \"2d91cde4-29d5-4947-b2ee-73e29ac244c2\") " pod="openstack/cinder3af6-account-delete-44b4q" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.326519 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell0ce1b-account-delete-p9nfs"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.328119 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0ce1b-account-delete-p9nfs" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.337169 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell0ce1b-account-delete-p9nfs"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.365687 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-jc6q5"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.371787 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sll76\" (UniqueName: \"kubernetes.io/projected/6ca96007-c66c-4e95-84b7-12eff893cca2-kube-api-access-sll76\") pod \"barbicanb6a2-account-delete-c6b5p\" (UID: \"6ca96007-c66c-4e95-84b7-12eff893cca2\") " pod="openstack/barbicanb6a2-account-delete-c6b5p" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.371962 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5ld4\" (UniqueName: \"kubernetes.io/projected/c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c-kube-api-access-g5ld4\") pod \"glance6d2b-account-delete-n47pm\" (UID: \"c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c\") " pod="openstack/glance6d2b-account-delete-n47pm" Oct 06 21:53:02 crc kubenswrapper[5014]: E1006 21:53:02.372434 5014 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Oct 06 21:53:02 crc kubenswrapper[5014]: E1006 21:53:02.372504 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-config-data podName:4b977fc8-6c11-41e6-9500-f0da2d66aea1 nodeName:}" failed. No retries permitted until 2025-10-06 21:53:02.872482235 +0000 UTC m=+1328.165519069 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-config-data") pod "rabbitmq-server-0" (UID: "4b977fc8-6c11-41e6-9500-f0da2d66aea1") : configmap "rabbitmq-config-data" not found Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.413222 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5ld4\" (UniqueName: \"kubernetes.io/projected/c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c-kube-api-access-g5ld4\") pod \"glance6d2b-account-delete-n47pm\" (UID: \"c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c\") " pod="openstack/glance6d2b-account-delete-n47pm" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.413305 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-jc6q5"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.434725 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder3af6-account-delete-44b4q" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.458684 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.458957 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" containerName="ovn-northd" containerID="cri-o://a5aaa8e6da3bb1475013fcf9505c94fb4f157784a202b67b1fafdd6706d374db" gracePeriod=30 Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.459351 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" containerName="openstack-network-exporter" containerID="cri-o://d9b24ea6635bde477fa710678733d33fd5f1e17d35ee8b466a9fe4445920964c" gracePeriod=30 Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.460001 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance6d2b-account-delete-n47pm" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.474799 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sll76\" (UniqueName: \"kubernetes.io/projected/6ca96007-c66c-4e95-84b7-12eff893cca2-kube-api-access-sll76\") pod \"barbicanb6a2-account-delete-c6b5p\" (UID: \"6ca96007-c66c-4e95-84b7-12eff893cca2\") " pod="openstack/barbicanb6a2-account-delete-c6b5p" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.474904 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6mlf\" (UniqueName: \"kubernetes.io/projected/17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43-kube-api-access-b6mlf\") pod \"novacell0ce1b-account-delete-p9nfs\" (UID: \"17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43\") " pod="openstack/novacell0ce1b-account-delete-p9nfs" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.513169 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sll76\" (UniqueName: \"kubernetes.io/projected/6ca96007-c66c-4e95-84b7-12eff893cca2-kube-api-access-sll76\") pod \"barbicanb6a2-account-delete-c6b5p\" (UID: \"6ca96007-c66c-4e95-84b7-12eff893cca2\") " pod="openstack/barbicanb6a2-account-delete-c6b5p" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.517166 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell1b5d1-account-delete-2bzts"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.519141 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell1b5d1-account-delete-2bzts" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.576429 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phjfc\" (UniqueName: \"kubernetes.io/projected/044b7af9-01a7-40c1-803c-30e568aaf1fe-kube-api-access-phjfc\") pod \"novacell1b5d1-account-delete-2bzts\" (UID: \"044b7af9-01a7-40c1-803c-30e568aaf1fe\") " pod="openstack/novacell1b5d1-account-delete-2bzts" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.576937 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6mlf\" (UniqueName: \"kubernetes.io/projected/17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43-kube-api-access-b6mlf\") pod \"novacell0ce1b-account-delete-p9nfs\" (UID: \"17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43\") " pod="openstack/novacell0ce1b-account-delete-p9nfs" Oct 06 21:53:02 crc kubenswrapper[5014]: E1006 21:53:02.595220 5014 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Oct 06 21:53:02 crc kubenswrapper[5014]: E1006 21:53:02.595273 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-config-data podName:c8f59d7d-f71b-46b0-bd32-476a2517a3b6 nodeName:}" failed. No retries permitted until 2025-10-06 21:53:03.595257989 +0000 UTC m=+1328.888294723 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-config-data") pod "rabbitmq-cell1-server-0" (UID: "c8f59d7d-f71b-46b0-bd32-476a2517a3b6") : configmap "rabbitmq-cell1-config-data" not found Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.595428 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell1b5d1-account-delete-2bzts"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.630770 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbicanb6a2-account-delete-c6b5p" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.659444 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6mlf\" (UniqueName: \"kubernetes.io/projected/17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43-kube-api-access-b6mlf\") pod \"novacell0ce1b-account-delete-p9nfs\" (UID: \"17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43\") " pod="openstack/novacell0ce1b-account-delete-p9nfs" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.659931 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novaapi4b16-account-delete-jhqrl"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.661644 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi4b16-account-delete-jhqrl" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.678202 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phjfc\" (UniqueName: \"kubernetes.io/projected/044b7af9-01a7-40c1-803c-30e568aaf1fe-kube-api-access-phjfc\") pod \"novacell1b5d1-account-delete-2bzts\" (UID: \"044b7af9-01a7-40c1-803c-30e568aaf1fe\") " pod="openstack/novacell1b5d1-account-delete-2bzts" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.681170 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0ce1b-account-delete-p9nfs" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.703804 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapi4b16-account-delete-jhqrl"] Oct 06 21:53:02 crc kubenswrapper[5014]: E1006 21:53:02.707257 5014 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-f4vpp" message=< Oct 06 21:53:02 crc kubenswrapper[5014]: Exiting ovn-controller (1) [ OK ] Oct 06 21:53:02 crc kubenswrapper[5014]: > Oct 06 21:53:02 crc kubenswrapper[5014]: E1006 21:53:02.707298 5014 kuberuntime_container.go:691] "PreStop hook failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " pod="openstack/ovn-controller-f4vpp" podUID="74db136d-3445-4a7e-bcae-4645888ec806" containerName="ovn-controller" containerID="cri-o://f4279eb17667d99549974582ac2811d308ce6f778194f59eb6a9ca77feb9eca8" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.707339 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-f4vpp" podUID="74db136d-3445-4a7e-bcae-4645888ec806" containerName="ovn-controller" containerID="cri-o://f4279eb17667d99549974582ac2811d308ce6f778194f59eb6a9ca77feb9eca8" gracePeriod=30 Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.742142 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement8e33-account-delete-g7pdl"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.743400 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement8e33-account-delete-g7pdl" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.753059 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phjfc\" (UniqueName: \"kubernetes.io/projected/044b7af9-01a7-40c1-803c-30e568aaf1fe-kube-api-access-phjfc\") pod \"novacell1b5d1-account-delete-2bzts\" (UID: \"044b7af9-01a7-40c1-803c-30e568aaf1fe\") " pod="openstack/novacell1b5d1-account-delete-2bzts" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.795344 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement8e33-account-delete-g7pdl"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.798483 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxgjk\" (UniqueName: \"kubernetes.io/projected/d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349-kube-api-access-nxgjk\") pod \"novaapi4b16-account-delete-jhqrl\" (UID: \"d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349\") " pod="openstack/novaapi4b16-account-delete-jhqrl" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.798653 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sql4v\" (UniqueName: \"kubernetes.io/projected/6c301c8b-acb9-4008-9832-ce83dc524b6d-kube-api-access-sql4v\") pod \"placement8e33-account-delete-g7pdl\" (UID: \"6c301c8b-acb9-4008-9832-ce83dc524b6d\") " pod="openstack/placement8e33-account-delete-g7pdl" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.910720 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-62g9k"] Oct 06 21:53:02 crc kubenswrapper[5014]: E1006 21:53:02.912576 5014 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Oct 06 21:53:02 crc kubenswrapper[5014]: E1006 21:53:02.912665 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-config-data podName:4b977fc8-6c11-41e6-9500-f0da2d66aea1 nodeName:}" failed. No retries permitted until 2025-10-06 21:53:03.912645734 +0000 UTC m=+1329.205682468 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-config-data") pod "rabbitmq-server-0" (UID: "4b977fc8-6c11-41e6-9500-f0da2d66aea1") : configmap "rabbitmq-config-data" not found Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.923571 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sql4v\" (UniqueName: \"kubernetes.io/projected/6c301c8b-acb9-4008-9832-ce83dc524b6d-kube-api-access-sql4v\") pod \"placement8e33-account-delete-g7pdl\" (UID: \"6c301c8b-acb9-4008-9832-ce83dc524b6d\") " pod="openstack/placement8e33-account-delete-g7pdl" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.924399 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxgjk\" (UniqueName: \"kubernetes.io/projected/d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349-kube-api-access-nxgjk\") pod \"novaapi4b16-account-delete-jhqrl\" (UID: \"d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349\") " pod="openstack/novaapi4b16-account-delete-jhqrl" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.956076 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sql4v\" (UniqueName: \"kubernetes.io/projected/6c301c8b-acb9-4008-9832-ce83dc524b6d-kube-api-access-sql4v\") pod \"placement8e33-account-delete-g7pdl\" (UID: \"6c301c8b-acb9-4008-9832-ce83dc524b6d\") " pod="openstack/placement8e33-account-delete-g7pdl" Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.960127 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-62g9k"] Oct 06 21:53:02 crc kubenswrapper[5014]: I1006 21:53:02.969313 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxgjk\" (UniqueName: \"kubernetes.io/projected/d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349-kube-api-access-nxgjk\") pod \"novaapi4b16-account-delete-jhqrl\" (UID: \"d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349\") " pod="openstack/novaapi4b16-account-delete-jhqrl" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.010708 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-2qbdf"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.011116 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell1b5d1-account-delete-2bzts" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.034736 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-bhl92"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.050548 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-2qbdf"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.079003 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-bhl92"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.088548 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi4b16-account-delete-jhqrl" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.159609 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement8e33-account-delete-g7pdl" Oct 06 21:53:03 crc kubenswrapper[5014]: E1006 21:53:03.208209 5014 secret.go:188] Couldn't get secret openstack/glance-scripts: secret "glance-scripts" not found Oct 06 21:53:03 crc kubenswrapper[5014]: E1006 21:53:03.208275 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-scripts podName:4a3bf50b-cb91-4201-affd-0c42d3585df2 nodeName:}" failed. No retries permitted until 2025-10-06 21:53:05.208259908 +0000 UTC m=+1330.501296642 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-scripts") pod "glance-default-external-api-0" (UID: "4a3bf50b-cb91-4201-affd-0c42d3585df2") : secret "glance-scripts" not found Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.292939 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-fwbdt" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovsdb-server" containerID="cri-o://52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" gracePeriod=29 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.327117 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-lcqfw"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.335598 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-fwbdt" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovs-vswitchd" containerID="cri-o://54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" gracePeriod=29 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.345507 5014 generic.go:334] "Generic (PLEG): container finished" podID="dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" containerID="d9b24ea6635bde477fa710678733d33fd5f1e17d35ee8b466a9fe4445920964c" exitCode=2 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.345562 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7","Type":"ContainerDied","Data":"d9b24ea6635bde477fa710678733d33fd5f1e17d35ee8b466a9fe4445920964c"} Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.356538 5014 generic.go:334] "Generic (PLEG): container finished" podID="74db136d-3445-4a7e-bcae-4645888ec806" containerID="f4279eb17667d99549974582ac2811d308ce6f778194f59eb6a9ca77feb9eca8" exitCode=0 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.356660 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f4vpp" event={"ID":"74db136d-3445-4a7e-bcae-4645888ec806","Type":"ContainerDied","Data":"f4279eb17667d99549974582ac2811d308ce6f778194f59eb6a9ca77feb9eca8"} Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.363420 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-lcqfw"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.374669 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-rf5xr"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.389855 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-rf5xr"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.394803 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.395070 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" containerName="cinder-scheduler" containerID="cri-o://8874248b35b3044ef165fdc3593e8e2e389f8fc195b0aace5c1260fbf7aea6e6" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.395180 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" containerName="probe" containerID="cri-o://8e43bbec207ad90407cab0459eeaa37da10b5228534b3bcc64a9d48cab00a4fb" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.401533 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.401761 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="06e8dc30-8c95-4585-82e0-fc82de286a1c" containerName="cinder-api-log" containerID="cri-o://217fe11fe290a286d9693b3ce94387c3c5d799112f056294ef17d3e38a043e2a" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.402085 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="06e8dc30-8c95-4585-82e0-fc82de286a1c" containerName="cinder-api" containerID="cri-o://a4c0aa6211dfec0a5eda7b347326298f80376112ca717764da53c00f048df69b" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.408459 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.415312 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="1132a0d0-bc9b-430d-a89e-33455c763b3c" containerName="openstack-network-exporter" containerID="cri-o://c8aaf100576649e3e25587bb86a9bf5da8f2697e6e40a96847857214fba91a73" gracePeriod=300 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.419540 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.429148 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="c7d1001f-b56b-4d52-88bc-4f23831c3509" containerName="openstack-network-exporter" containerID="cri-o://19f11c3ccbb300a39fa2c1f9012ea3f2ed6574d0a166bcfbc5e489893f193296" gracePeriod=300 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.438791 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.439452 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="account-server" containerID="cri-o://6c9c5b52c3ece4980e0a040effefe3ca51032003d2dec58743e81e14eff2bd08" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.439582 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="swift-recon-cron" containerID="cri-o://794ec9c1ac30a70a53bd70e3b117f5a1d66ee9fd5b291b3742bca6c5ce899fad" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.439670 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="rsync" containerID="cri-o://46a2145bb6c9090f1616d68d05a57ae9daf788137e9fef57af9e989e14a6f0ac" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.439716 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-expirer" containerID="cri-o://706435a61f829966a3dd81b83578483182dd823c97c44d790d748dae50b96405" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.439771 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-updater" containerID="cri-o://32fd41e8d0ac65e1df8af741b277995fce5511e9e71104405b7ef81473bcf2a0" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.439821 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-auditor" containerID="cri-o://8c6ee51bcb2aa47b868de1b54eca4d9f60363bb30fa19275e968859b3dd6b211" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.439867 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-replicator" containerID="cri-o://f82e937ea9ed292c5b6b2e0ed118bb02c000e888acec699801c0aef2ac147660" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.439924 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-server" containerID="cri-o://27a6ef41e95125c0eb0ca5c6f7e55fe49acb2d2f70f72c3b31eab3c0fb6cd5e9" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.439968 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="container-updater" containerID="cri-o://1f314e90fabf2871f624de1f83d91f5c7a575db546028192458bff82b490aaf1" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.440032 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="container-auditor" containerID="cri-o://2493907335e39e153ca0be098c83d4742c5eba72021fb33f5ee6878e68153c1e" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.440071 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="container-replicator" containerID="cri-o://7cac454e8ca5e7af86acbea681e349fae219a72762c7c9b2d920802f4e900488" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.440114 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="container-server" containerID="cri-o://d474f69bb53df946b7ca116226cc152b798653cd694c8d2e13e91ca35d97a083" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.440167 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="account-reaper" containerID="cri-o://cd46a133abb2d0da04ee1bdf5e939ea29e0a1f05c5d2709ffb6fe66c7656b025" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.440212 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="account-auditor" containerID="cri-o://9721677542abcd800ca6abd93a96aae1a47615326a5084a4d6243c91345c2fac" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.440257 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="account-replicator" containerID="cri-o://c89697b2f7e511e4d98946d9a1e15c87f63523c323ed01382b801bdcf2a5fd0e" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.487411 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-pwpps"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.496292 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-9zq5k_0d674559-08b5-41c9-8783-a5e42504fb3e/openstack-network-exporter/0.log" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.496366 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.542547 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a133d28-39e7-4768-83c0-9b59bef04241" path="/var/lib/kubelet/pods/2a133d28-39e7-4768-83c0-9b59bef04241/volumes" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.552286 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b292ee8-d31c-4e73-80e9-ccc915aeb406" path="/var/lib/kubelet/pods/2b292ee8-d31c-4e73-80e9-ccc915aeb406/volumes" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.552818 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="388c3436-9432-48f2-ab10-6741959aebf5" path="/var/lib/kubelet/pods/388c3436-9432-48f2-ab10-6741959aebf5/volumes" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.554915 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6386c486-dff3-4e2a-8312-d14c0b3ba0a5" path="/var/lib/kubelet/pods/6386c486-dff3-4e2a-8312-d14c0b3ba0a5/volumes" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.555513 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b198c4e1-6133-4729-b58a-c83946d45a5d" path="/var/lib/kubelet/pods/b198c4e1-6133-4729-b58a-c83946d45a5d/volumes" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.556177 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4ee6065-52e4-434d-a944-a56539092b3b" path="/var/lib/kubelet/pods/b4ee6065-52e4-434d-a944-a56539092b3b/volumes" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.572913 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8f0d160-17d7-4876-925a-93b29c26847a" path="/var/lib/kubelet/pods/b8f0d160-17d7-4876-925a-93b29c26847a/volumes" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.642405 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8fjs\" (UniqueName: \"kubernetes.io/projected/0d674559-08b5-41c9-8783-a5e42504fb3e-kube-api-access-h8fjs\") pod \"0d674559-08b5-41c9-8783-a5e42504fb3e\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.642454 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d674559-08b5-41c9-8783-a5e42504fb3e-metrics-certs-tls-certs\") pod \"0d674559-08b5-41c9-8783-a5e42504fb3e\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.642524 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d674559-08b5-41c9-8783-a5e42504fb3e-config\") pod \"0d674559-08b5-41c9-8783-a5e42504fb3e\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.642630 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/0d674559-08b5-41c9-8783-a5e42504fb3e-ovs-rundir\") pod \"0d674559-08b5-41c9-8783-a5e42504fb3e\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.642652 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/0d674559-08b5-41c9-8783-a5e42504fb3e-ovn-rundir\") pod \"0d674559-08b5-41c9-8783-a5e42504fb3e\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.642677 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d674559-08b5-41c9-8783-a5e42504fb3e-combined-ca-bundle\") pod \"0d674559-08b5-41c9-8783-a5e42504fb3e\" (UID: \"0d674559-08b5-41c9-8783-a5e42504fb3e\") " Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.645555 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d674559-08b5-41c9-8783-a5e42504fb3e-config" (OuterVolumeSpecName: "config") pod "0d674559-08b5-41c9-8783-a5e42504fb3e" (UID: "0d674559-08b5-41c9-8783-a5e42504fb3e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.645572 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d674559-08b5-41c9-8783-a5e42504fb3e-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "0d674559-08b5-41c9-8783-a5e42504fb3e" (UID: "0d674559-08b5-41c9-8783-a5e42504fb3e"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.645600 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d674559-08b5-41c9-8783-a5e42504fb3e-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "0d674559-08b5-41c9-8783-a5e42504fb3e" (UID: "0d674559-08b5-41c9-8783-a5e42504fb3e"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:53:03 crc kubenswrapper[5014]: E1006 21:53:03.646418 5014 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Oct 06 21:53:03 crc kubenswrapper[5014]: E1006 21:53:03.646497 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-config-data podName:c8f59d7d-f71b-46b0-bd32-476a2517a3b6 nodeName:}" failed. No retries permitted until 2025-10-06 21:53:05.646479491 +0000 UTC m=+1330.939516225 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-config-data") pod "rabbitmq-cell1-server-0" (UID: "c8f59d7d-f71b-46b0-bd32-476a2517a3b6") : configmap "rabbitmq-cell1-config-data" not found Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.647054 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d674559-08b5-41c9-8783-a5e42504fb3e-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.647077 5014 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/0d674559-08b5-41c9-8783-a5e42504fb3e-ovs-rundir\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.647087 5014 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/0d674559-08b5-41c9-8783-a5e42504fb3e-ovn-rundir\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.672797 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d674559-08b5-41c9-8783-a5e42504fb3e-kube-api-access-h8fjs" (OuterVolumeSpecName: "kube-api-access-h8fjs") pod "0d674559-08b5-41c9-8783-a5e42504fb3e" (UID: "0d674559-08b5-41c9-8783-a5e42504fb3e"). InnerVolumeSpecName "kube-api-access-h8fjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.703357 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="c7d1001f-b56b-4d52-88bc-4f23831c3509" containerName="ovsdbserver-nb" containerID="cri-o://9bdf32744d43a96f7be12f93ad6660d8bbb3d324702e5bee26e36c2cea2725ce" gracePeriod=300 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.720042 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d674559-08b5-41c9-8783-a5e42504fb3e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0d674559-08b5-41c9-8783-a5e42504fb3e" (UID: "0d674559-08b5-41c9-8783-a5e42504fb3e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.746726 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="1132a0d0-bc9b-430d-a89e-33455c763b3c" containerName="ovsdbserver-sb" containerID="cri-o://a6e535773cd137a5bab0a6870ca512a9e234a397494ec79686683eaaade66b11" gracePeriod=300 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.756531 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d674559-08b5-41c9-8783-a5e42504fb3e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.756565 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8fjs\" (UniqueName: \"kubernetes.io/projected/0d674559-08b5-41c9-8783-a5e42504fb3e-kube-api-access-h8fjs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.808040 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-pwpps"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.808089 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5967cc9597-h6t4m"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.808111 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.808127 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.808146 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-f4ddcc578-kbrhw"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.809125 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-f4ddcc578-kbrhw" podUID="411ad591-8dad-46ef-8a44-88e86f5c86dd" containerName="placement-log" containerID="cri-o://9e1f60c39c5b159a972961d88da6cb37576b5e416b62ae8289bdf2915b1ab1f7" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.809283 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-f4ddcc578-kbrhw" podUID="411ad591-8dad-46ef-8a44-88e86f5c86dd" containerName="placement-api" containerID="cri-o://e8673788407b2a922e671f5a29648d8cec35c573c85be82d267e82064c4a8203" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.809673 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" podUID="ff29a0b3-1307-4fdb-bead-68d87f2f2923" containerName="dnsmasq-dns" containerID="cri-o://17be5865767feac1ef6592a1850f5a4352b1c7d266e2950f2d1dbfb81a9f252e" gracePeriod=10 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.809846 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4a3bf50b-cb91-4201-affd-0c42d3585df2" containerName="glance-log" containerID="cri-o://f69566e2159d9025363346b43e15495ad0dab925585a51aae0e3e6b46ac486f8" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.809998 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="daa5c33b-d941-4030-bf9f-cd6ed831986e" containerName="glance-log" containerID="cri-o://4563049f441233d9899b9510c09134c6b97cbbfee44d45e1869f367480e6495a" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.810075 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4a3bf50b-cb91-4201-affd-0c42d3585df2" containerName="glance-httpd" containerID="cri-o://f799cb043ff7d806ae4f877f34b2084e03598231d0c40a3275884b945a348c9e" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.810152 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="daa5c33b-d941-4030-bf9f-cd6ed831986e" containerName="glance-httpd" containerID="cri-o://1b5ca8147b22af5bd68e4073c702853e918a3da61cc14255747ea6cd2310a427" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.931018 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-796f6ffb8f-4rjtg"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.931497 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" podUID="78a24140-d3a5-463a-aaf9-49857f14decc" containerName="proxy-httpd" containerID="cri-o://599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.931629 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" podUID="78a24140-d3a5-463a-aaf9-49857f14decc" containerName="proxy-server" containerID="cri-o://4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: E1006 21:53:03.970220 5014 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Oct 06 21:53:03 crc kubenswrapper[5014]: E1006 21:53:03.970280 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-config-data podName:4b977fc8-6c11-41e6-9500-f0da2d66aea1 nodeName:}" failed. No retries permitted until 2025-10-06 21:53:05.970266352 +0000 UTC m=+1331.263303086 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-config-data") pod "rabbitmq-server-0" (UID: "4b977fc8-6c11-41e6-9500-f0da2d66aea1") : configmap "rabbitmq-config-data" not found Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.989784 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.990002 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3c1e8f70-227b-40e0-aceb-470eed382180" containerName="nova-api-log" containerID="cri-o://1427a4912ecba9f84ea4a41b472f8f218b0c4df4bc3dd48139b02a15ecfe2a67" gracePeriod=30 Oct 06 21:53:03 crc kubenswrapper[5014]: I1006 21:53:03.990360 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3c1e8f70-227b-40e0-aceb-470eed382180" containerName="nova-api-api" containerID="cri-o://b619016e22f6883fb4aa46369b1d687e057d2e2ffd28c9ca34b55365fb9839c5" gracePeriod=30 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.020682 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.037317 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.037580 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="1dfeacf8-a072-4b44-bed9-618acd31fb6f" containerName="nova-metadata-log" containerID="cri-o://adb9b9301193619d4a64043aa363957a79778e3963acf98e99151a9c5d6dc68d" gracePeriod=30 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.038757 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="1dfeacf8-a072-4b44-bed9-618acd31fb6f" containerName="nova-metadata-metadata" containerID="cri-o://6d9c705385f5fdafb7775a2b08e388c98d6323f0cc8e8398d03748b74369b48a" gracePeriod=30 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.046173 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-6ht9f"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.078810 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-3af6-account-create-wt6wz"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.092554 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-6ht9f"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.110750 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d674559-08b5-41c9-8783-a5e42504fb3e-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "0d674559-08b5-41c9-8783-a5e42504fb3e" (UID: "0d674559-08b5-41c9-8783-a5e42504fb3e"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.118348 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder3af6-account-delete-44b4q"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.141871 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-3af6-account-create-wt6wz"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.173134 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.183250 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="c8f59d7d-f71b-46b0-bd32-476a2517a3b6" containerName="rabbitmq" containerID="cri-o://eb4d1ac3e92d3cfcfc09e4936b90190475ab22e4925f8ce4f363a59470abfbe5" gracePeriod=604800 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.185022 5014 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d674559-08b5-41c9-8783-a5e42504fb3e-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.199475 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-2txhf"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.210665 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-2txhf"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.219791 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-6d2b-account-create-dn55l"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.235913 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-6d2b-account-create-dn55l"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.253296 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f4vpp" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.259549 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance6d2b-account-delete-n47pm"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.272319 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-f6fff5c8f-xbgr9"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.278048 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-f6fff5c8f-xbgr9" podUID="7eeb278b-517f-4b26-825e-12d7d0d969ce" containerName="neutron-api" containerID="cri-o://7313b7e6181c156a532c5a8a8da1000e022b6207ee08165f639a58cf73fc9b6a" gracePeriod=30 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.278427 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-f6fff5c8f-xbgr9" podUID="7eeb278b-517f-4b26-825e-12d7d0d969ce" containerName="neutron-httpd" containerID="cri-o://48a0f810064b04d6c7a9053d863e731b5f0654d50ec97630c69d7ccb6c16e34f" gracePeriod=30 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.286346 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hl7qk\" (UniqueName: \"kubernetes.io/projected/74db136d-3445-4a7e-bcae-4645888ec806-kube-api-access-hl7qk\") pod \"74db136d-3445-4a7e-bcae-4645888ec806\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.286405 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/74db136d-3445-4a7e-bcae-4645888ec806-ovn-controller-tls-certs\") pod \"74db136d-3445-4a7e-bcae-4645888ec806\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.286453 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-run\") pod \"74db136d-3445-4a7e-bcae-4645888ec806\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.286518 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-log-ovn\") pod \"74db136d-3445-4a7e-bcae-4645888ec806\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.286651 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/74db136d-3445-4a7e-bcae-4645888ec806-scripts\") pod \"74db136d-3445-4a7e-bcae-4645888ec806\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.286667 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-run-ovn\") pod \"74db136d-3445-4a7e-bcae-4645888ec806\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.286743 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74db136d-3445-4a7e-bcae-4645888ec806-combined-ca-bundle\") pod \"74db136d-3445-4a7e-bcae-4645888ec806\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.288811 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-run" (OuterVolumeSpecName: "var-run") pod "74db136d-3445-4a7e-bcae-4645888ec806" (UID: "74db136d-3445-4a7e-bcae-4645888ec806"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.292133 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74db136d-3445-4a7e-bcae-4645888ec806-scripts" (OuterVolumeSpecName: "scripts") pod "74db136d-3445-4a7e-bcae-4645888ec806" (UID: "74db136d-3445-4a7e-bcae-4645888ec806"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.292192 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "74db136d-3445-4a7e-bcae-4645888ec806" (UID: "74db136d-3445-4a7e-bcae-4645888ec806"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.292206 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "74db136d-3445-4a7e-bcae-4645888ec806" (UID: "74db136d-3445-4a7e-bcae-4645888ec806"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.293948 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74db136d-3445-4a7e-bcae-4645888ec806-kube-api-access-hl7qk" (OuterVolumeSpecName: "kube-api-access-hl7qk") pod "74db136d-3445-4a7e-bcae-4645888ec806" (UID: "74db136d-3445-4a7e-bcae-4645888ec806"). InnerVolumeSpecName "kube-api-access-hl7qk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.335995 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-s9ksw"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.351663 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-s9ksw"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.374187 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbicanb6a2-account-delete-c6b5p"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.388064 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="3b18812d-9eec-4254-8633-b40f55244e47" containerName="galera" containerID="cri-o://1ca1df3e861d5336e4515ca03c187e532ee4429553cf2f9930ba7e1d9925c254" gracePeriod=30 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.394297 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-b6a2-account-create-6h8mw"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.400172 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74db136d-3445-4a7e-bcae-4645888ec806-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "74db136d-3445-4a7e-bcae-4645888ec806" (UID: "74db136d-3445-4a7e-bcae-4645888ec806"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.408699 5014 generic.go:334] "Generic (PLEG): container finished" podID="411ad591-8dad-46ef-8a44-88e86f5c86dd" containerID="9e1f60c39c5b159a972961d88da6cb37576b5e416b62ae8289bdf2915b1ab1f7" exitCode=143 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.408797 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f4ddcc578-kbrhw" event={"ID":"411ad591-8dad-46ef-8a44-88e86f5c86dd","Type":"ContainerDied","Data":"9e1f60c39c5b159a972961d88da6cb37576b5e416b62ae8289bdf2915b1ab1f7"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.409726 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74db136d-3445-4a7e-bcae-4645888ec806-combined-ca-bundle\") pod \"74db136d-3445-4a7e-bcae-4645888ec806\" (UID: \"74db136d-3445-4a7e-bcae-4645888ec806\") " Oct 06 21:53:04 crc kubenswrapper[5014]: W1006 21:53:04.409978 5014 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/74db136d-3445-4a7e-bcae-4645888ec806/volumes/kubernetes.io~secret/combined-ca-bundle Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.410004 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74db136d-3445-4a7e-bcae-4645888ec806-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "74db136d-3445-4a7e-bcae-4645888ec806" (UID: "74db136d-3445-4a7e-bcae-4645888ec806"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.416684 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74db136d-3445-4a7e-bcae-4645888ec806-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "74db136d-3445-4a7e-bcae-4645888ec806" (UID: "74db136d-3445-4a7e-bcae-4645888ec806"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.418433 5014 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-log-ovn\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.418469 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/74db136d-3445-4a7e-bcae-4645888ec806-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.418488 5014 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.418500 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74db136d-3445-4a7e-bcae-4645888ec806-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.418516 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hl7qk\" (UniqueName: \"kubernetes.io/projected/74db136d-3445-4a7e-bcae-4645888ec806-kube-api-access-hl7qk\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.418528 5014 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/74db136d-3445-4a7e-bcae-4645888ec806-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.418544 5014 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/74db136d-3445-4a7e-bcae-4645888ec806-var-run\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.430890 5014 generic.go:334] "Generic (PLEG): container finished" podID="3c1e8f70-227b-40e0-aceb-470eed382180" containerID="1427a4912ecba9f84ea4a41b472f8f218b0c4df4bc3dd48139b02a15ecfe2a67" exitCode=143 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.430998 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3c1e8f70-227b-40e0-aceb-470eed382180","Type":"ContainerDied","Data":"1427a4912ecba9f84ea4a41b472f8f218b0c4df4bc3dd48139b02a15ecfe2a67"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.445364 5014 generic.go:334] "Generic (PLEG): container finished" podID="daa5c33b-d941-4030-bf9f-cd6ed831986e" containerID="4563049f441233d9899b9510c09134c6b97cbbfee44d45e1869f367480e6495a" exitCode=143 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.447084 5014 generic.go:334] "Generic (PLEG): container finished" podID="06e8dc30-8c95-4585-82e0-fc82de286a1c" containerID="217fe11fe290a286d9693b3ce94387c3c5d799112f056294ef17d3e38a043e2a" exitCode=143 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.458020 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-ptmjb"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.458066 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"daa5c33b-d941-4030-bf9f-cd6ed831986e","Type":"ContainerDied","Data":"4563049f441233d9899b9510c09134c6b97cbbfee44d45e1869f367480e6495a"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.458092 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"06e8dc30-8c95-4585-82e0-fc82de286a1c","Type":"ContainerDied","Data":"217fe11fe290a286d9693b3ce94387c3c5d799112f056294ef17d3e38a043e2a"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.465661 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-b6a2-account-create-6h8mw"] Oct 06 21:53:04 crc kubenswrapper[5014]: E1006 21:53:04.480862 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a6e535773cd137a5bab0a6870ca512a9e234a397494ec79686683eaaade66b11 is running failed: container process not found" containerID="a6e535773cd137a5bab0a6870ca512a9e234a397494ec79686683eaaade66b11" cmd=["/usr/bin/pidof","ovsdb-server"] Oct 06 21:53:04 crc kubenswrapper[5014]: E1006 21:53:04.484667 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a6e535773cd137a5bab0a6870ca512a9e234a397494ec79686683eaaade66b11 is running failed: container process not found" containerID="a6e535773cd137a5bab0a6870ca512a9e234a397494ec79686683eaaade66b11" cmd=["/usr/bin/pidof","ovsdb-server"] Oct 06 21:53:04 crc kubenswrapper[5014]: E1006 21:53:04.495763 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a6e535773cd137a5bab0a6870ca512a9e234a397494ec79686683eaaade66b11 is running failed: container process not found" containerID="a6e535773cd137a5bab0a6870ca512a9e234a397494ec79686683eaaade66b11" cmd=["/usr/bin/pidof","ovsdb-server"] Oct 06 21:53:04 crc kubenswrapper[5014]: E1006 21:53:04.495915 5014 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a6e535773cd137a5bab0a6870ca512a9e234a397494ec79686683eaaade66b11 is running failed: container process not found" probeType="Readiness" pod="openstack/ovsdbserver-sb-0" podUID="1132a0d0-bc9b-430d-a89e-33455c763b3c" containerName="ovsdbserver-sb" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.508364 5014 generic.go:334] "Generic (PLEG): container finished" podID="05220712-c8ae-4ac8-9c49-d74770367b33" containerID="46a2145bb6c9090f1616d68d05a57ae9daf788137e9fef57af9e989e14a6f0ac" exitCode=0 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.508394 5014 generic.go:334] "Generic (PLEG): container finished" podID="05220712-c8ae-4ac8-9c49-d74770367b33" containerID="706435a61f829966a3dd81b83578483182dd823c97c44d790d748dae50b96405" exitCode=0 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.508401 5014 generic.go:334] "Generic (PLEG): container finished" podID="05220712-c8ae-4ac8-9c49-d74770367b33" containerID="32fd41e8d0ac65e1df8af741b277995fce5511e9e71104405b7ef81473bcf2a0" exitCode=0 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.508407 5014 generic.go:334] "Generic (PLEG): container finished" podID="05220712-c8ae-4ac8-9c49-d74770367b33" containerID="8c6ee51bcb2aa47b868de1b54eca4d9f60363bb30fa19275e968859b3dd6b211" exitCode=0 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.508840 5014 generic.go:334] "Generic (PLEG): container finished" podID="05220712-c8ae-4ac8-9c49-d74770367b33" containerID="f82e937ea9ed292c5b6b2e0ed118bb02c000e888acec699801c0aef2ac147660" exitCode=0 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.508856 5014 generic.go:334] "Generic (PLEG): container finished" podID="05220712-c8ae-4ac8-9c49-d74770367b33" containerID="27a6ef41e95125c0eb0ca5c6f7e55fe49acb2d2f70f72c3b31eab3c0fb6cd5e9" exitCode=0 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.508979 5014 generic.go:334] "Generic (PLEG): container finished" podID="05220712-c8ae-4ac8-9c49-d74770367b33" containerID="1f314e90fabf2871f624de1f83d91f5c7a575db546028192458bff82b490aaf1" exitCode=0 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.508992 5014 generic.go:334] "Generic (PLEG): container finished" podID="05220712-c8ae-4ac8-9c49-d74770367b33" containerID="2493907335e39e153ca0be098c83d4742c5eba72021fb33f5ee6878e68153c1e" exitCode=0 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.508998 5014 generic.go:334] "Generic (PLEG): container finished" podID="05220712-c8ae-4ac8-9c49-d74770367b33" containerID="7cac454e8ca5e7af86acbea681e349fae219a72762c7c9b2d920802f4e900488" exitCode=0 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509004 5014 generic.go:334] "Generic (PLEG): container finished" podID="05220712-c8ae-4ac8-9c49-d74770367b33" containerID="cd46a133abb2d0da04ee1bdf5e939ea29e0a1f05c5d2709ffb6fe66c7656b025" exitCode=0 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509010 5014 generic.go:334] "Generic (PLEG): container finished" podID="05220712-c8ae-4ac8-9c49-d74770367b33" containerID="9721677542abcd800ca6abd93a96aae1a47615326a5084a4d6243c91345c2fac" exitCode=0 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509018 5014 generic.go:334] "Generic (PLEG): container finished" podID="05220712-c8ae-4ac8-9c49-d74770367b33" containerID="c89697b2f7e511e4d98946d9a1e15c87f63523c323ed01382b801bdcf2a5fd0e" exitCode=0 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509229 5014 generic.go:334] "Generic (PLEG): container finished" podID="05220712-c8ae-4ac8-9c49-d74770367b33" containerID="6c9c5b52c3ece4980e0a040effefe3ca51032003d2dec58743e81e14eff2bd08" exitCode=0 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509341 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerDied","Data":"46a2145bb6c9090f1616d68d05a57ae9daf788137e9fef57af9e989e14a6f0ac"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509451 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerDied","Data":"706435a61f829966a3dd81b83578483182dd823c97c44d790d748dae50b96405"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509550 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerDied","Data":"32fd41e8d0ac65e1df8af741b277995fce5511e9e71104405b7ef81473bcf2a0"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509566 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerDied","Data":"8c6ee51bcb2aa47b868de1b54eca4d9f60363bb30fa19275e968859b3dd6b211"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509574 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerDied","Data":"f82e937ea9ed292c5b6b2e0ed118bb02c000e888acec699801c0aef2ac147660"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509582 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerDied","Data":"27a6ef41e95125c0eb0ca5c6f7e55fe49acb2d2f70f72c3b31eab3c0fb6cd5e9"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509590 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerDied","Data":"1f314e90fabf2871f624de1f83d91f5c7a575db546028192458bff82b490aaf1"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509597 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerDied","Data":"2493907335e39e153ca0be098c83d4742c5eba72021fb33f5ee6878e68153c1e"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509605 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerDied","Data":"7cac454e8ca5e7af86acbea681e349fae219a72762c7c9b2d920802f4e900488"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509627 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerDied","Data":"cd46a133abb2d0da04ee1bdf5e939ea29e0a1f05c5d2709ffb6fe66c7656b025"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509636 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerDied","Data":"9721677542abcd800ca6abd93a96aae1a47615326a5084a4d6243c91345c2fac"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509644 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerDied","Data":"c89697b2f7e511e4d98946d9a1e15c87f63523c323ed01382b801bdcf2a5fd0e"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.509652 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerDied","Data":"6c9c5b52c3ece4980e0a040effefe3ca51032003d2dec58743e81e14eff2bd08"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.511744 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-ptmjb"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.512873 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-f4vpp" event={"ID":"74db136d-3445-4a7e-bcae-4645888ec806","Type":"ContainerDied","Data":"c04b471884820c8a60d36e31f1b5f50ec48bb852b1521f331933734ff789c995"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.512903 5014 scope.go:117] "RemoveContainer" containerID="f4279eb17667d99549974582ac2811d308ce6f778194f59eb6a9ca77feb9eca8" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.513024 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-f4vpp" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.520639 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-ce1b-account-create-cjdcf"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.533659 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-ce1b-account-create-cjdcf"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.534965 5014 generic.go:334] "Generic (PLEG): container finished" podID="db1ef19c-c5bd-4bf1-a58c-73ff198caaa9" containerID="085db7e17be44d9a79cea2381416abb7231f57828c4dbcfdbdfc446dac2fb6f0" exitCode=137 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.537189 5014 generic.go:334] "Generic (PLEG): container finished" podID="1dfeacf8-a072-4b44-bed9-618acd31fb6f" containerID="adb9b9301193619d4a64043aa363957a79778e3963acf98e99151a9c5d6dc68d" exitCode=143 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.537278 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1dfeacf8-a072-4b44-bed9-618acd31fb6f","Type":"ContainerDied","Data":"adb9b9301193619d4a64043aa363957a79778e3963acf98e99151a9c5d6dc68d"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.541419 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0ce1b-account-delete-p9nfs"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.553917 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-9zq5k_0d674559-08b5-41c9-8783-a5e42504fb3e/openstack-network-exporter/0.log" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.554167 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-9zq5k" event={"ID":"0d674559-08b5-41c9-8783-a5e42504fb3e","Type":"ContainerDied","Data":"460eba36cd0f4513831dd10fbdaac25af83e5140d9c623eb9e0285331e98f5be"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.554334 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-9zq5k" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.562665 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-b5d1-account-create-sl9xc"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.565960 5014 scope.go:117] "RemoveContainer" containerID="4548df78c36932efb3465251204787ed31111710ed70b8eb0c470d23a627a6c7" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.568809 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_1132a0d0-bc9b-430d-a89e-33455c763b3c/ovsdbserver-sb/0.log" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.568847 5014 generic.go:334] "Generic (PLEG): container finished" podID="1132a0d0-bc9b-430d-a89e-33455c763b3c" containerID="c8aaf100576649e3e25587bb86a9bf5da8f2697e6e40a96847857214fba91a73" exitCode=2 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.568863 5014 generic.go:334] "Generic (PLEG): container finished" podID="1132a0d0-bc9b-430d-a89e-33455c763b3c" containerID="a6e535773cd137a5bab0a6870ca512a9e234a397494ec79686683eaaade66b11" exitCode=143 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.568912 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1132a0d0-bc9b-430d-a89e-33455c763b3c","Type":"ContainerDied","Data":"c8aaf100576649e3e25587bb86a9bf5da8f2697e6e40a96847857214fba91a73"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.568939 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1132a0d0-bc9b-430d-a89e-33455c763b3c","Type":"ContainerDied","Data":"a6e535773cd137a5bab0a6870ca512a9e234a397494ec79686683eaaade66b11"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.605232 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c7d1001f-b56b-4d52-88bc-4f23831c3509/ovsdbserver-nb/0.log" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.605280 5014 generic.go:334] "Generic (PLEG): container finished" podID="c7d1001f-b56b-4d52-88bc-4f23831c3509" containerID="19f11c3ccbb300a39fa2c1f9012ea3f2ed6574d0a166bcfbc5e489893f193296" exitCode=2 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.605297 5014 generic.go:334] "Generic (PLEG): container finished" podID="c7d1001f-b56b-4d52-88bc-4f23831c3509" containerID="9bdf32744d43a96f7be12f93ad6660d8bbb3d324702e5bee26e36c2cea2725ce" exitCode=143 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.605398 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c7d1001f-b56b-4d52-88bc-4f23831c3509","Type":"ContainerDied","Data":"19f11c3ccbb300a39fa2c1f9012ea3f2ed6574d0a166bcfbc5e489893f193296"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.605429 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c7d1001f-b56b-4d52-88bc-4f23831c3509","Type":"ContainerDied","Data":"9bdf32744d43a96f7be12f93ad6660d8bbb3d324702e5bee26e36c2cea2725ce"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.612379 5014 generic.go:334] "Generic (PLEG): container finished" podID="ff29a0b3-1307-4fdb-bead-68d87f2f2923" containerID="17be5865767feac1ef6592a1850f5a4352b1c7d266e2950f2d1dbfb81a9f252e" exitCode=0 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.612457 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" event={"ID":"ff29a0b3-1307-4fdb-bead-68d87f2f2923","Type":"ContainerDied","Data":"17be5865767feac1ef6592a1850f5a4352b1c7d266e2950f2d1dbfb81a9f252e"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.614527 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-b5d1-account-create-sl9xc"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.623695 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-wmjhx"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.635989 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-wmjhx"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.640681 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell1b5d1-account-delete-2bzts"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.647847 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-55rwp"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.651587 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.652344 5014 generic.go:334] "Generic (PLEG): container finished" podID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" exitCode=0 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.652388 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-fwbdt" event={"ID":"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345","Type":"ContainerDied","Data":"52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.654809 5014 generic.go:334] "Generic (PLEG): container finished" podID="4a3bf50b-cb91-4201-affd-0c42d3585df2" containerID="f69566e2159d9025363346b43e15495ad0dab925585a51aae0e3e6b46ac486f8" exitCode=143 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.654845 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4a3bf50b-cb91-4201-affd-0c42d3585df2","Type":"ContainerDied","Data":"f69566e2159d9025363346b43e15495ad0dab925585a51aae0e3e6b46ac486f8"} Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.656790 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-697f765b44-77s6g"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.656988 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-697f765b44-77s6g" podUID="13fe806f-0e4e-4ea7-838b-938c5fe74c99" containerName="barbican-keystone-listener-log" containerID="cri-o://7c61f00177c33532524b8163faef3133d7c88a0c6edeaf8d14eb4a6022f7abdd" gracePeriod=30 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.657357 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-697f765b44-77s6g" podUID="13fe806f-0e4e-4ea7-838b-938c5fe74c99" containerName="barbican-keystone-listener" containerID="cri-o://dec3beff6abbdcf32f4d602873fe4ab229755aef532b1c3308ed69b08438e50e" gracePeriod=30 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.680546 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-4b16-account-create-qrrfb"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.705936 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-55rwp"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.726931 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-openstack-config-secret\") pod \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\" (UID: \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.727125 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-openstack-config\") pod \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\" (UID: \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.727288 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-combined-ca-bundle\") pod \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\" (UID: \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.727433 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gpslr\" (UniqueName: \"kubernetes.io/projected/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-kube-api-access-gpslr\") pod \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\" (UID: \"db1ef19c-c5bd-4bf1-a58c-73ff198caaa9\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.731970 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-4b16-account-create-qrrfb"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.738814 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-kube-api-access-gpslr" (OuterVolumeSpecName: "kube-api-access-gpslr") pod "db1ef19c-c5bd-4bf1-a58c-73ff198caaa9" (UID: "db1ef19c-c5bd-4bf1-a58c-73ff198caaa9"). InnerVolumeSpecName "kube-api-access-gpslr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.766494 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapi4b16-account-delete-jhqrl"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.791373 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-p5kln"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.807548 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_1132a0d0-bc9b-430d-a89e-33455c763b3c/ovsdbserver-sb/0.log" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.807739 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.835583 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gpslr\" (UniqueName: \"kubernetes.io/projected/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-kube-api-access-gpslr\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.838389 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c7d1001f-b56b-4d52-88bc-4f23831c3509/ovsdbserver-nb/0.log" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.838494 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.843502 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-p5kln"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.851848 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-2e7c-account-create-g9chd"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.859947 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-2e7c-account-create-g9chd"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.870714 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-gc874"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.872358 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.877322 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-gc874"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.885693 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.885997 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="49c2ecb8-63d7-4275-97ff-7aa899707212" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://a6b7e7b2fd6d1afbe67e44ec51642bedfaa579e1f6be92a11c811ecf98a44b29" gracePeriod=30 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.901484 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement8e33-account-delete-g7pdl"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.905381 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "db1ef19c-c5bd-4bf1-a58c-73ff198caaa9" (UID: "db1ef19c-c5bd-4bf1-a58c-73ff198caaa9"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.912821 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-849cf44bc5-9qnb4"] Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.913318 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-849cf44bc5-9qnb4" podUID="56ddac83-2b9f-4a7d-a9c9-02de44e0ab00" containerName="barbican-worker-log" containerID="cri-o://cc9f5a4202ce6f9f16ab8d8453f50c25c26d08e6f673de106491b3f0b64a0984" gracePeriod=30 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.913373 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-849cf44bc5-9qnb4" podUID="56ddac83-2b9f-4a7d-a9c9-02de44e0ab00" containerName="barbican-worker" containerID="cri-o://6e30f54354b4e05d72cd38208e8b8588c2363ab84617b170457752ebb404a286" gracePeriod=30 Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.937453 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1132a0d0-bc9b-430d-a89e-33455c763b3c-scripts\") pod \"1132a0d0-bc9b-430d-a89e-33455c763b3c\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.937972 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"1132a0d0-bc9b-430d-a89e-33455c763b3c\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.938123 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-ovsdbserver-sb\") pod \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.938254 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-dns-svc\") pod \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.938384 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7d1001f-b56b-4d52-88bc-4f23831c3509-config\") pod \"c7d1001f-b56b-4d52-88bc-4f23831c3509\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.940431 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1132a0d0-bc9b-430d-a89e-33455c763b3c-config\") pod \"1132a0d0-bc9b-430d-a89e-33455c763b3c\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.940592 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-ovsdbserver-nb\") pod \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.940770 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1132a0d0-bc9b-430d-a89e-33455c763b3c-ovsdb-rundir\") pod \"1132a0d0-bc9b-430d-a89e-33455c763b3c\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.940915 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"c7d1001f-b56b-4d52-88bc-4f23831c3509\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.941164 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6d994\" (UniqueName: \"kubernetes.io/projected/1132a0d0-bc9b-430d-a89e-33455c763b3c-kube-api-access-6d994\") pod \"1132a0d0-bc9b-430d-a89e-33455c763b3c\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.939383 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1132a0d0-bc9b-430d-a89e-33455c763b3c-scripts" (OuterVolumeSpecName: "scripts") pod "1132a0d0-bc9b-430d-a89e-33455c763b3c" (UID: "1132a0d0-bc9b-430d-a89e-33455c763b3c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.941365 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-dns-swift-storage-0\") pod \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.942535 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-combined-ca-bundle\") pod \"c7d1001f-b56b-4d52-88bc-4f23831c3509\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.942603 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-combined-ca-bundle\") pod \"1132a0d0-bc9b-430d-a89e-33455c763b3c\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.942657 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-metrics-certs-tls-certs\") pod \"c7d1001f-b56b-4d52-88bc-4f23831c3509\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.942700 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lwwxv\" (UniqueName: \"kubernetes.io/projected/ff29a0b3-1307-4fdb-bead-68d87f2f2923-kube-api-access-lwwxv\") pod \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.942731 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-ovsdbserver-nb-tls-certs\") pod \"c7d1001f-b56b-4d52-88bc-4f23831c3509\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.942751 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-metrics-certs-tls-certs\") pod \"1132a0d0-bc9b-430d-a89e-33455c763b3c\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.942777 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c7d1001f-b56b-4d52-88bc-4f23831c3509-ovsdb-rundir\") pod \"c7d1001f-b56b-4d52-88bc-4f23831c3509\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.942817 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c7d1001f-b56b-4d52-88bc-4f23831c3509-scripts\") pod \"c7d1001f-b56b-4d52-88bc-4f23831c3509\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.942833 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-ovsdbserver-sb-tls-certs\") pod \"1132a0d0-bc9b-430d-a89e-33455c763b3c\" (UID: \"1132a0d0-bc9b-430d-a89e-33455c763b3c\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.942892 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2cw7b\" (UniqueName: \"kubernetes.io/projected/c7d1001f-b56b-4d52-88bc-4f23831c3509-kube-api-access-2cw7b\") pod \"c7d1001f-b56b-4d52-88bc-4f23831c3509\" (UID: \"c7d1001f-b56b-4d52-88bc-4f23831c3509\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.940049 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7d1001f-b56b-4d52-88bc-4f23831c3509-config" (OuterVolumeSpecName: "config") pod "c7d1001f-b56b-4d52-88bc-4f23831c3509" (UID: "c7d1001f-b56b-4d52-88bc-4f23831c3509"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.942779 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "1132a0d0-bc9b-430d-a89e-33455c763b3c" (UID: "1132a0d0-bc9b-430d-a89e-33455c763b3c"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.942917 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-config\") pod \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\" (UID: \"ff29a0b3-1307-4fdb-bead-68d87f2f2923\") " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.943188 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1132a0d0-bc9b-430d-a89e-33455c763b3c-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "1132a0d0-bc9b-430d-a89e-33455c763b3c" (UID: "1132a0d0-bc9b-430d-a89e-33455c763b3c"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.944470 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1132a0d0-bc9b-430d-a89e-33455c763b3c-config" (OuterVolumeSpecName: "config") pod "1132a0d0-bc9b-430d-a89e-33455c763b3c" (UID: "1132a0d0-bc9b-430d-a89e-33455c763b3c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.946251 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7d1001f-b56b-4d52-88bc-4f23831c3509-scripts" (OuterVolumeSpecName: "scripts") pod "c7d1001f-b56b-4d52-88bc-4f23831c3509" (UID: "c7d1001f-b56b-4d52-88bc-4f23831c3509"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.946409 5014 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-openstack-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.946440 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1132a0d0-bc9b-430d-a89e-33455c763b3c-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.946502 5014 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.946515 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7d1001f-b56b-4d52-88bc-4f23831c3509-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.946527 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1132a0d0-bc9b-430d-a89e-33455c763b3c-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.948264 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1132a0d0-bc9b-430d-a89e-33455c763b3c-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.957269 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1132a0d0-bc9b-430d-a89e-33455c763b3c-kube-api-access-6d994" (OuterVolumeSpecName: "kube-api-access-6d994") pod "1132a0d0-bc9b-430d-a89e-33455c763b3c" (UID: "1132a0d0-bc9b-430d-a89e-33455c763b3c"). InnerVolumeSpecName "kube-api-access-6d994". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.959354 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7d1001f-b56b-4d52-88bc-4f23831c3509-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "c7d1001f-b56b-4d52-88bc-4f23831c3509" (UID: "c7d1001f-b56b-4d52-88bc-4f23831c3509"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.961124 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "c7d1001f-b56b-4d52-88bc-4f23831c3509" (UID: "c7d1001f-b56b-4d52-88bc-4f23831c3509"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.982184 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff29a0b3-1307-4fdb-bead-68d87f2f2923-kube-api-access-lwwxv" (OuterVolumeSpecName: "kube-api-access-lwwxv") pod "ff29a0b3-1307-4fdb-bead-68d87f2f2923" (UID: "ff29a0b3-1307-4fdb-bead-68d87f2f2923"). InnerVolumeSpecName "kube-api-access-lwwxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.989964 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-8e33-account-create-jrgt5"] Oct 06 21:53:04 crc kubenswrapper[5014]: W1006 21:53:04.991860 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6ca96007_c66c_4e95_84b7_12eff893cca2.slice/crio-74251ca83d284fe7649fba35fd5fd4027dc8e3058303e83b341178b0c7e22ecb WatchSource:0}: Error finding container 74251ca83d284fe7649fba35fd5fd4027dc8e3058303e83b341178b0c7e22ecb: Status 404 returned error can't find the container with id 74251ca83d284fe7649fba35fd5fd4027dc8e3058303e83b341178b0c7e22ecb Oct 06 21:53:04 crc kubenswrapper[5014]: I1006 21:53:04.997682 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7d1001f-b56b-4d52-88bc-4f23831c3509-kube-api-access-2cw7b" (OuterVolumeSpecName: "kube-api-access-2cw7b") pod "c7d1001f-b56b-4d52-88bc-4f23831c3509" (UID: "c7d1001f-b56b-4d52-88bc-4f23831c3509"). InnerVolumeSpecName "kube-api-access-2cw7b". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.005780 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "db1ef19c-c5bd-4bf1-a58c-73ff198caaa9" (UID: "db1ef19c-c5bd-4bf1-a58c-73ff198caaa9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:05 crc kubenswrapper[5014]: W1006 21:53:05.006390 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc0ab5ea1_2549_4d17_8758_bfe2ad6e4b1c.slice/crio-f6f375e6a5f35c31f2cc6ba298a50d7c2005606af2f1ec61b80bf43d94a71e13 WatchSource:0}: Error finding container f6f375e6a5f35c31f2cc6ba298a50d7c2005606af2f1ec61b80bf43d94a71e13: Status 404 returned error can't find the container with id f6f375e6a5f35c31f2cc6ba298a50d7c2005606af2f1ec61b80bf43d94a71e13 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.044984 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-598975567d-rtcs4"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.045229 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-598975567d-rtcs4" podUID="0bd07ab2-973f-4531-8e5f-68d349e231b4" containerName="barbican-api-log" containerID="cri-o://8f5fff8a69f244b622369e3d3222e772ceb03080fc79e98a2126892aa6220818" gracePeriod=30 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.045664 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-598975567d-rtcs4" podUID="0bd07ab2-973f-4531-8e5f-68d349e231b4" containerName="barbican-api" containerID="cri-o://179498d178eaf12fad9776b3020c6b558f9bd713b5bcdcb7b15c957545542e01" gracePeriod=30 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.047295 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "db1ef19c-c5bd-4bf1-a58c-73ff198caaa9" (UID: "db1ef19c-c5bd-4bf1-a58c-73ff198caaa9"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.049928 5014 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.049969 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.049984 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6d994\" (UniqueName: \"kubernetes.io/projected/1132a0d0-bc9b-430d-a89e-33455c763b3c-kube-api-access-6d994\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.049995 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lwwxv\" (UniqueName: \"kubernetes.io/projected/ff29a0b3-1307-4fdb-bead-68d87f2f2923-kube-api-access-lwwxv\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.050007 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c7d1001f-b56b-4d52-88bc-4f23831c3509-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.050019 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c7d1001f-b56b-4d52-88bc-4f23831c3509-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.050028 5014 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.050036 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2cw7b\" (UniqueName: \"kubernetes.io/projected/c7d1001f-b56b-4d52-88bc-4f23831c3509-kube-api-access-2cw7b\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.068735 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-wnhrf"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.100960 5014 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.109538 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-8e33-account-create-jrgt5"] Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.147870 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a5aaa8e6da3bb1475013fcf9505c94fb4f157784a202b67b1fafdd6706d374db" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.148028 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-wnhrf"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.152129 5014 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.173754 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-z8mlq"] Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.174504 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a5aaa8e6da3bb1475013fcf9505c94fb4f157784a202b67b1fafdd6706d374db" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.174654 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1132a0d0-bc9b-430d-a89e-33455c763b3c" (UID: "1132a0d0-bc9b-430d-a89e-33455c763b3c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.185858 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.186092 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="7de90fe7-747a-4334-be2a-d3b5ee6b8148" containerName="nova-cell1-conductor-conductor" containerID="cri-o://3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370" gracePeriod=30 Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.200092 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a5aaa8e6da3bb1475013fcf9505c94fb4f157784a202b67b1fafdd6706d374db" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.200387 5014 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" containerName="ovn-northd" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.216794 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c7d1001f-b56b-4d52-88bc-4f23831c3509" (UID: "c7d1001f-b56b-4d52-88bc-4f23831c3509"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.238880 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-z8mlq"] Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.239793 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.250035 5014 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.255371 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.255405 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.255422 5014 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.255573 5014 secret.go:188] Couldn't get secret openstack/glance-scripts: secret "glance-scripts" not found Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.255655 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-scripts podName:4a3bf50b-cb91-4201-affd-0c42d3585df2 nodeName:}" failed. No retries permitted until 2025-10-06 21:53:09.255635925 +0000 UTC m=+1334.548672669 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-scripts") pod "glance-default-external-api-0" (UID: "4a3bf50b-cb91-4201-affd-0c42d3585df2") : secret "glance-scripts" not found Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.265365 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.265552 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="6ebf215b-a88f-4b08-8f2e-58284b7d4548" containerName="nova-cell0-conductor-conductor" containerID="cri-o://e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee" gracePeriod=30 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.268452 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ff29a0b3-1307-4fdb-bead-68d87f2f2923" (UID: "ff29a0b3-1307-4fdb-bead-68d87f2f2923"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.279843 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.284301 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.284380 5014 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="7de90fe7-747a-4334-be2a-d3b5ee6b8148" containerName="nova-cell1-conductor-conductor" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.290571 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ff29a0b3-1307-4fdb-bead-68d87f2f2923" (UID: "ff29a0b3-1307-4fdb-bead-68d87f2f2923"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.290660 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.294834 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.295065 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="cefbd6b6-3aa3-459d-9f8d-060736f4de92" containerName="nova-scheduler-scheduler" containerID="cri-o://b521f92d690d7f4582f8adf8d2826fa33c967ac340f2621ad4c5efb64bf29cfb" gracePeriod=30 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.314740 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder3af6-account-delete-44b4q"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.322386 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "1132a0d0-bc9b-430d-a89e-33455c763b3c" (UID: "1132a0d0-bc9b-430d-a89e-33455c763b3c"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.322451 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-f4vpp"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.329715 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-f4vpp"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.339667 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-9zq5k"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.344425 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-9zq5k"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.382354 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ff29a0b3-1307-4fdb-bead-68d87f2f2923" (UID: "ff29a0b3-1307-4fdb-bead-68d87f2f2923"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.390119 5014 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.390175 5014 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.390202 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.390217 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.432652 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance6d2b-account-delete-n47pm"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.446826 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbicanb6a2-account-delete-c6b5p"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.468070 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell1b5d1-account-delete-2bzts"] Oct 06 21:53:05 crc kubenswrapper[5014]: W1006 21:53:05.487124 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd2bef7e3_d6fe_48f7_9ec5_a9c89f0e1349.slice/crio-8a9c1e9521e1fcde2011f64017b09796b05119281d58d0104ea3dc5bdf0fcf18 WatchSource:0}: Error finding container 8a9c1e9521e1fcde2011f64017b09796b05119281d58d0104ea3dc5bdf0fcf18: Status 404 returned error can't find the container with id 8a9c1e9521e1fcde2011f64017b09796b05119281d58d0104ea3dc5bdf0fcf18 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.535045 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ff29a0b3-1307-4fdb-bead-68d87f2f2923" (UID: "ff29a0b3-1307-4fdb-bead-68d87f2f2923"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.537740 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="4b977fc8-6c11-41e6-9500-f0da2d66aea1" containerName="rabbitmq" containerID="cri-o://5d8ae003e8dc923a3ad6afd87e268712900c1d33e130493581a8045c5495deed" gracePeriod=604800 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.554982 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d674559-08b5-41c9-8783-a5e42504fb3e" path="/var/lib/kubelet/pods/0d674559-08b5-41c9-8783-a5e42504fb3e/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.556317 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17a9c7b6-c877-4057-86ee-13d1ee4f9515" path="/var/lib/kubelet/pods/17a9c7b6-c877-4057-86ee-13d1ee4f9515/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.557000 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c" path="/var/lib/kubelet/pods/1fb7e064-fe75-4bc6-9a5d-3c1d92db4a3c/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.557766 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="347f9b08-ba1b-4065-be89-4a13a0f36b23" path="/var/lib/kubelet/pods/347f9b08-ba1b-4065-be89-4a13a0f36b23/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.559176 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34c1300b-0bf2-4bb3-af1b-7fb7f33182d2" path="/var/lib/kubelet/pods/34c1300b-0bf2-4bb3-af1b-7fb7f33182d2/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.564093 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56178220-15bb-4fed-9fcf-0f6e34ffeb3e" path="/var/lib/kubelet/pods/56178220-15bb-4fed-9fcf-0f6e34ffeb3e/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.565039 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5775db60-8257-4c71-ae41-2fd585c2a108" path="/var/lib/kubelet/pods/5775db60-8257-4c71-ae41-2fd585c2a108/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.565590 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-config" (OuterVolumeSpecName: "config") pod "ff29a0b3-1307-4fdb-bead-68d87f2f2923" (UID: "ff29a0b3-1307-4fdb-bead-68d87f2f2923"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.565932 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68ae1bd5-0dc8-4d02-b060-421ad434a8bd" path="/var/lib/kubelet/pods/68ae1bd5-0dc8-4d02-b060-421ad434a8bd/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.567474 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b04355a-0ce8-4929-8ba6-b8aca24d4daa" path="/var/lib/kubelet/pods/6b04355a-0ce8-4929-8ba6-b8aca24d4daa/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.568238 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74db136d-3445-4a7e-bcae-4645888ec806" path="/var/lib/kubelet/pods/74db136d-3445-4a7e-bcae-4645888ec806/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.575673 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="848d175d-ed70-4686-b0bf-5f07deb65fb1" path="/var/lib/kubelet/pods/848d175d-ed70-4686-b0bf-5f07deb65fb1/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.580216 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86104dc0-8799-4cbd-bead-038f3358dfba" path="/var/lib/kubelet/pods/86104dc0-8799-4cbd-bead-038f3358dfba/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.592517 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c" path="/var/lib/kubelet/pods/89b3ca3c-1317-4552-ac8d-d8bcc75ddf0c/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.601358 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d74142f-3a0a-4a47-99ce-be7ecac62f76" path="/var/lib/kubelet/pods/8d74142f-3a0a-4a47-99ce-be7ecac62f76/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.605166 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.605198 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff29a0b3-1307-4fdb-bead-68d87f2f2923-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.610488 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93196faa-4f5f-4f54-82de-89cad407be89" path="/var/lib/kubelet/pods/93196faa-4f5f-4f54-82de-89cad407be89/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.611585 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc12c0f4-59e6-40f3-a4ae-0fa426576beb" path="/var/lib/kubelet/pods/bc12c0f4-59e6-40f3-a4ae-0fa426576beb/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.612121 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5a46c2b-6455-4103-b35c-db8e3301d1e9" path="/var/lib/kubelet/pods/c5a46c2b-6455-4103-b35c-db8e3301d1e9/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.612599 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="caa95dd3-2ffa-4b18-b7e9-ad4075b25304" path="/var/lib/kubelet/pods/caa95dd3-2ffa-4b18-b7e9-ad4075b25304/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.624996 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cfdf797b-91a7-456f-a243-f08ba12aafbf" path="/var/lib/kubelet/pods/cfdf797b-91a7-456f-a243-f08ba12aafbf/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.626593 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db1ef19c-c5bd-4bf1-a58c-73ff198caaa9" path="/var/lib/kubelet/pods/db1ef19c-c5bd-4bf1-a58c-73ff198caaa9/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.627165 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6" path="/var/lib/kubelet/pods/fbdc5c13-ba4e-4ebb-bc58-e08d2eacc4d6/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.628737 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "c7d1001f-b56b-4d52-88bc-4f23831c3509" (UID: "c7d1001f-b56b-4d52-88bc-4f23831c3509"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.634958 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff881d46-e8a9-4eb9-a009-faffb59e898b" path="/var/lib/kubelet/pods/ff881d46-e8a9-4eb9-a009-faffb59e898b/volumes" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.635748 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "c7d1001f-b56b-4d52-88bc-4f23831c3509" (UID: "c7d1001f-b56b-4d52-88bc-4f23831c3509"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.657862 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "1132a0d0-bc9b-430d-a89e-33455c763b3c" (UID: "1132a0d0-bc9b-430d-a89e-33455c763b3c"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.668693 5014 generic.go:334] "Generic (PLEG): container finished" podID="0bd07ab2-973f-4531-8e5f-68d349e231b4" containerID="8f5fff8a69f244b622369e3d3222e772ceb03080fc79e98a2126892aa6220818" exitCode=143 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.695695 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-598975567d-rtcs4" event={"ID":"0bd07ab2-973f-4531-8e5f-68d349e231b4","Type":"ContainerDied","Data":"8f5fff8a69f244b622369e3d3222e772ceb03080fc79e98a2126892aa6220818"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.696369 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0ce1b-account-delete-p9nfs"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.696472 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell1b5d1-account-delete-2bzts" event={"ID":"044b7af9-01a7-40c1-803c-30e568aaf1fe","Type":"ContainerStarted","Data":"8f8633dea0e82b35bb874aa415d7499f833675234055285f31c2effdddf56e5f"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.696491 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapi4b16-account-delete-jhqrl"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.696538 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement8e33-account-delete-g7pdl"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.706570 5014 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.706601 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7d1001f-b56b-4d52-88bc-4f23831c3509-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.706610 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1132a0d0-bc9b-430d-a89e-33455c763b3c-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.707292 5014 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.707343 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-config-data podName:c8f59d7d-f71b-46b0-bd32-476a2517a3b6 nodeName:}" failed. No retries permitted until 2025-10-06 21:53:09.707326762 +0000 UTC m=+1335.000363496 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-config-data") pod "rabbitmq-cell1-server-0" (UID: "c8f59d7d-f71b-46b0-bd32-476a2517a3b6") : configmap "rabbitmq-cell1-config-data" not found Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.707763 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance6d2b-account-delete-n47pm" event={"ID":"c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c","Type":"ContainerStarted","Data":"f6f375e6a5f35c31f2cc6ba298a50d7c2005606af2f1ec61b80bf43d94a71e13"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.711922 5014 generic.go:334] "Generic (PLEG): container finished" podID="13fe806f-0e4e-4ea7-838b-938c5fe74c99" containerID="7c61f00177c33532524b8163faef3133d7c88a0c6edeaf8d14eb4a6022f7abdd" exitCode=143 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.711973 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-697f765b44-77s6g" event={"ID":"13fe806f-0e4e-4ea7-838b-938c5fe74c99","Type":"ContainerDied","Data":"7c61f00177c33532524b8163faef3133d7c88a0c6edeaf8d14eb4a6022f7abdd"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.713506 5014 generic.go:334] "Generic (PLEG): container finished" podID="2d91cde4-29d5-4947-b2ee-73e29ac244c2" containerID="3e401ff685888d65e22cfe526043fdc38f33382719d48441e4570e416cd7a132" exitCode=0 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.713610 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder3af6-account-delete-44b4q" event={"ID":"2d91cde4-29d5-4947-b2ee-73e29ac244c2","Type":"ContainerDied","Data":"3e401ff685888d65e22cfe526043fdc38f33382719d48441e4570e416cd7a132"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.713648 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder3af6-account-delete-44b4q" event={"ID":"2d91cde4-29d5-4947-b2ee-73e29ac244c2","Type":"ContainerStarted","Data":"c8bc3b2777e163d1cfbb3ad4b9b99a0b510d4b3fe38d78e7a3bdb2c5524a9f5a"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.724489 5014 generic.go:334] "Generic (PLEG): container finished" podID="7eeb278b-517f-4b26-825e-12d7d0d969ce" containerID="48a0f810064b04d6c7a9053d863e731b5f0654d50ec97630c69d7ccb6c16e34f" exitCode=0 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.724590 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f6fff5c8f-xbgr9" event={"ID":"7eeb278b-517f-4b26-825e-12d7d0d969ce","Type":"ContainerDied","Data":"48a0f810064b04d6c7a9053d863e731b5f0654d50ec97630c69d7ccb6c16e34f"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.737931 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c7d1001f-b56b-4d52-88bc-4f23831c3509/ovsdbserver-nb/0.log" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.738021 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c7d1001f-b56b-4d52-88bc-4f23831c3509","Type":"ContainerDied","Data":"585b70c5b2c537198fef8bf78d157546bd8594061a8f3754f0065278ca9e18d6"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.738065 5014 scope.go:117] "RemoveContainer" containerID="19f11c3ccbb300a39fa2c1f9012ea3f2ed6574d0a166bcfbc5e489893f193296" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.738063 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.740910 5014 generic.go:334] "Generic (PLEG): container finished" podID="02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" containerID="8e43bbec207ad90407cab0459eeaa37da10b5228534b3bcc64a9d48cab00a4fb" exitCode=0 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.740963 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf","Type":"ContainerDied","Data":"8e43bbec207ad90407cab0459eeaa37da10b5228534b3bcc64a9d48cab00a4fb"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.753739 5014 generic.go:334] "Generic (PLEG): container finished" podID="56ddac83-2b9f-4a7d-a9c9-02de44e0ab00" containerID="cc9f5a4202ce6f9f16ab8d8453f50c25c26d08e6f673de106491b3f0b64a0984" exitCode=143 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.753776 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-849cf44bc5-9qnb4" event={"ID":"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00","Type":"ContainerDied","Data":"cc9f5a4202ce6f9f16ab8d8453f50c25c26d08e6f673de106491b3f0b64a0984"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.777155 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi4b16-account-delete-jhqrl" event={"ID":"d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349","Type":"ContainerStarted","Data":"8a9c1e9521e1fcde2011f64017b09796b05119281d58d0104ea3dc5bdf0fcf18"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.813311 5014 generic.go:334] "Generic (PLEG): container finished" podID="3b18812d-9eec-4254-8633-b40f55244e47" containerID="1ca1df3e861d5336e4515ca03c187e532ee4429553cf2f9930ba7e1d9925c254" exitCode=0 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.813522 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3b18812d-9eec-4254-8633-b40f55244e47","Type":"ContainerDied","Data":"1ca1df3e861d5336e4515ca03c187e532ee4429553cf2f9930ba7e1d9925c254"} Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.826263 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.826401 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.826489 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbicanb6a2-account-delete-c6b5p" event={"ID":"6ca96007-c66c-4e95-84b7-12eff893cca2","Type":"ContainerStarted","Data":"74251ca83d284fe7649fba35fd5fd4027dc8e3058303e83b341178b0c7e22ecb"} Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.829302 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.829361 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.829853 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.830047 5014 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-fwbdt" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovsdb-server" Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.830406 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.830524 5014 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-fwbdt" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovs-vswitchd" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.830437 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0ce1b-account-delete-p9nfs" event={"ID":"17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43","Type":"ContainerStarted","Data":"dd97441d420ed9abcbc3cd0ee6fefab8dcd471113b74f34bc46d5c707e6eef6c"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.849457 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.850121 5014 generic.go:334] "Generic (PLEG): container finished" podID="05220712-c8ae-4ac8-9c49-d74770367b33" containerID="d474f69bb53df946b7ca116226cc152b798653cd694c8d2e13e91ca35d97a083" exitCode=0 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.850186 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerDied","Data":"d474f69bb53df946b7ca116226cc152b798653cd694c8d2e13e91ca35d97a083"} Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.850202 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.862874 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.868167 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" event={"ID":"ff29a0b3-1307-4fdb-bead-68d87f2f2923","Type":"ContainerDied","Data":"f81ffb74c2f7606ba2f55e7dcc2cd016020251f5c77642d89ac281c6cdb007f5"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.868314 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5967cc9597-h6t4m" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.880505 5014 scope.go:117] "RemoveContainer" containerID="9bdf32744d43a96f7be12f93ad6660d8bbb3d324702e5bee26e36c2cea2725ce" Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.880512 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 06 21:53:05 crc kubenswrapper[5014]: E1006 21:53:05.880571 5014 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="6ebf215b-a88f-4b08-8f2e-58284b7d4548" containerName="nova-cell0-conductor-conductor" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.886097 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.889958 5014 generic.go:334] "Generic (PLEG): container finished" podID="49c2ecb8-63d7-4275-97ff-7aa899707212" containerID="a6b7e7b2fd6d1afbe67e44ec51642bedfaa579e1f6be92a11c811ecf98a44b29" exitCode=0 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.890016 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"49c2ecb8-63d7-4275-97ff-7aa899707212","Type":"ContainerDied","Data":"a6b7e7b2fd6d1afbe67e44ec51642bedfaa579e1f6be92a11c811ecf98a44b29"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.896052 5014 generic.go:334] "Generic (PLEG): container finished" podID="78a24140-d3a5-463a-aaf9-49857f14decc" containerID="4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0" exitCode=0 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.896069 5014 generic.go:334] "Generic (PLEG): container finished" podID="78a24140-d3a5-463a-aaf9-49857f14decc" containerID="599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb" exitCode=0 Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.896135 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.896492 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" event={"ID":"78a24140-d3a5-463a-aaf9-49857f14decc","Type":"ContainerDied","Data":"4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.896555 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" event={"ID":"78a24140-d3a5-463a-aaf9-49857f14decc","Type":"ContainerDied","Data":"599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.896567 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" event={"ID":"78a24140-d3a5-463a-aaf9-49857f14decc","Type":"ContainerDied","Data":"be32ce11a90718c0fd2883a20314f687ca16c197ca71b4d220ef215df90df5da"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.899315 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_1132a0d0-bc9b-430d-a89e-33455c763b3c/ovsdbserver-sb/0.log" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.899351 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1132a0d0-bc9b-430d-a89e-33455c763b3c","Type":"ContainerDied","Data":"688f79fff94f7065f9ed863518dfb80e9ef98e776cb4324a339f69c71e6bcb3a"} Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.899418 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Oct 06 21:53:05 crc kubenswrapper[5014]: I1006 21:53:05.990590 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.011580 5014 scope.go:117] "RemoveContainer" containerID="17be5865767feac1ef6592a1850f5a4352b1c7d266e2950f2d1dbfb81a9f252e" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.019224 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-internal-tls-certs\") pod \"78a24140-d3a5-463a-aaf9-49857f14decc\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.019286 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-config-data\") pod \"78a24140-d3a5-463a-aaf9-49857f14decc\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.019451 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-public-tls-certs\") pod \"78a24140-d3a5-463a-aaf9-49857f14decc\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.019511 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzchk\" (UniqueName: \"kubernetes.io/projected/78a24140-d3a5-463a-aaf9-49857f14decc-kube-api-access-gzchk\") pod \"78a24140-d3a5-463a-aaf9-49857f14decc\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.019545 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-combined-ca-bundle\") pod \"78a24140-d3a5-463a-aaf9-49857f14decc\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.019596 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78a24140-d3a5-463a-aaf9-49857f14decc-run-httpd\") pod \"78a24140-d3a5-463a-aaf9-49857f14decc\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.019650 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78a24140-d3a5-463a-aaf9-49857f14decc-log-httpd\") pod \"78a24140-d3a5-463a-aaf9-49857f14decc\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.029838 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/78a24140-d3a5-463a-aaf9-49857f14decc-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "78a24140-d3a5-463a-aaf9-49857f14decc" (UID: "78a24140-d3a5-463a-aaf9-49857f14decc"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.035767 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/78a24140-d3a5-463a-aaf9-49857f14decc-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "78a24140-d3a5-463a-aaf9-49857f14decc" (UID: "78a24140-d3a5-463a-aaf9-49857f14decc"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.052244 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85hpl\" (UniqueName: \"kubernetes.io/projected/3b18812d-9eec-4254-8633-b40f55244e47-kube-api-access-85hpl\") pod \"3b18812d-9eec-4254-8633-b40f55244e47\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.052320 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-galera-tls-certs\") pod \"3b18812d-9eec-4254-8633-b40f55244e47\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.052357 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"3b18812d-9eec-4254-8633-b40f55244e47\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.052400 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/78a24140-d3a5-463a-aaf9-49857f14decc-etc-swift\") pod \"78a24140-d3a5-463a-aaf9-49857f14decc\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.053377 5014 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78a24140-d3a5-463a-aaf9-49857f14decc-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.053404 5014 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78a24140-d3a5-463a-aaf9-49857f14decc-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:06 crc kubenswrapper[5014]: E1006 21:53:06.053647 5014 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Oct 06 21:53:06 crc kubenswrapper[5014]: E1006 21:53:06.053705 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-config-data podName:4b977fc8-6c11-41e6-9500-f0da2d66aea1 nodeName:}" failed. No retries permitted until 2025-10-06 21:53:10.053688062 +0000 UTC m=+1335.346724796 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-config-data") pod "rabbitmq-server-0" (UID: "4b977fc8-6c11-41e6-9500-f0da2d66aea1") : configmap "rabbitmq-config-data" not found Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.054305 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78a24140-d3a5-463a-aaf9-49857f14decc-kube-api-access-gzchk" (OuterVolumeSpecName: "kube-api-access-gzchk") pod "78a24140-d3a5-463a-aaf9-49857f14decc" (UID: "78a24140-d3a5-463a-aaf9-49857f14decc"). InnerVolumeSpecName "kube-api-access-gzchk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.069411 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5967cc9597-h6t4m"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.073763 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5967cc9597-h6t4m"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.089263 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.096504 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b18812d-9eec-4254-8633-b40f55244e47-kube-api-access-85hpl" (OuterVolumeSpecName: "kube-api-access-85hpl") pod "3b18812d-9eec-4254-8633-b40f55244e47" (UID: "3b18812d-9eec-4254-8633-b40f55244e47"). InnerVolumeSpecName "kube-api-access-85hpl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.103469 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.116379 5014 scope.go:117] "RemoveContainer" containerID="8a37ab91c905eddb6ad7d2e3f88dac93cf2150ba561314df29c56fe058ecc2e8" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.139023 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "mysql-db") pod "3b18812d-9eec-4254-8633-b40f55244e47" (UID: "3b18812d-9eec-4254-8633-b40f55244e47"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.139346 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78a24140-d3a5-463a-aaf9-49857f14decc-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "78a24140-d3a5-463a-aaf9-49857f14decc" (UID: "78a24140-d3a5-463a-aaf9-49857f14decc"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.158203 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-operator-scripts\") pod \"3b18812d-9eec-4254-8633-b40f55244e47\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.158322 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-combined-ca-bundle\") pod \"3b18812d-9eec-4254-8633-b40f55244e47\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.158346 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-kolla-config\") pod \"3b18812d-9eec-4254-8633-b40f55244e47\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.158410 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3b18812d-9eec-4254-8633-b40f55244e47-config-data-generated\") pod \"3b18812d-9eec-4254-8633-b40f55244e47\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.158451 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-secrets\") pod \"3b18812d-9eec-4254-8633-b40f55244e47\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.158662 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-config-data-default\") pod \"3b18812d-9eec-4254-8633-b40f55244e47\" (UID: \"3b18812d-9eec-4254-8633-b40f55244e47\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.159184 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85hpl\" (UniqueName: \"kubernetes.io/projected/3b18812d-9eec-4254-8633-b40f55244e47-kube-api-access-85hpl\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.159221 5014 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.159234 5014 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/78a24140-d3a5-463a-aaf9-49857f14decc-etc-swift\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.159246 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzchk\" (UniqueName: \"kubernetes.io/projected/78a24140-d3a5-463a-aaf9-49857f14decc-kube-api-access-gzchk\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.159574 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3b18812d-9eec-4254-8633-b40f55244e47" (UID: "3b18812d-9eec-4254-8633-b40f55244e47"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.159684 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "3b18812d-9eec-4254-8633-b40f55244e47" (UID: "3b18812d-9eec-4254-8633-b40f55244e47"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.159849 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "3b18812d-9eec-4254-8633-b40f55244e47" (UID: "3b18812d-9eec-4254-8633-b40f55244e47"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.166068 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b18812d-9eec-4254-8633-b40f55244e47-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "3b18812d-9eec-4254-8633-b40f55244e47" (UID: "3b18812d-9eec-4254-8633-b40f55244e47"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.185404 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-secrets" (OuterVolumeSpecName: "secrets") pod "3b18812d-9eec-4254-8633-b40f55244e47" (UID: "3b18812d-9eec-4254-8633-b40f55244e47"). InnerVolumeSpecName "secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.262648 5014 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-operator-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.262670 5014 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-kolla-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.262680 5014 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3b18812d-9eec-4254-8633-b40f55244e47-config-data-generated\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.262688 5014 reconciler_common.go:293] "Volume detached for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-secrets\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.262700 5014 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3b18812d-9eec-4254-8633-b40f55244e47-config-data-default\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.376901 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.377167 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="ceilometer-central-agent" containerID="cri-o://1e488f802cc8da011f91e05b3c478b47334580c1a8c26408bcadc4e55ec4cd81" gracePeriod=30 Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.377636 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="proxy-httpd" containerID="cri-o://1c78227e99eced5448ed246e1bcbef1171c8d55eecc061f23758f282026f4fed" gracePeriod=30 Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.377729 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="sg-core" containerID="cri-o://ec20b9f41983e4b8a0ea0f9349995c36994eedc43877f52f349e3a2298e48326" gracePeriod=30 Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.377739 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="ceilometer-notification-agent" containerID="cri-o://d23dd50b826bbf964b1ff4d5d05a5097816f29157fbba7ced1c970a4eec16c3b" gracePeriod=30 Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.397507 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.399794 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="24c4cd4c-297a-45d8-ad6f-e24e53736ecc" containerName="kube-state-metrics" containerID="cri-o://9baa658cf1475d9f5420e5c02783c38281b57faef5be3f69ab55c29aa8529137" gracePeriod=30 Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.499345 5014 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.553370 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.553575 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="cd16c866-91b0-4261-a084-7a96ac597c04" containerName="memcached" containerID="cri-o://cdc30334797cf0d0a1abb5ab3b3e87dbac8996fc837919c9242d311431f403b2" gracePeriod=30 Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.585801 5014 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.613998 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-t5wff"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.672950 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-t5wff"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.729850 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-f6fff5c8f-xbgr9" podUID="7eeb278b-517f-4b26-825e-12d7d0d969ce" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.159:9696/\": dial tcp 10.217.0.159:9696: connect: connection refused" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.729919 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-27f2p"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.759506 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-27f2p"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.767088 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.767918 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-config-data" (OuterVolumeSpecName: "config-data") pod "78a24140-d3a5-463a-aaf9-49857f14decc" (UID: "78a24140-d3a5-463a-aaf9-49857f14decc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.774547 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-78bf4bbdb7-6fpl9"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.774741 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-78bf4bbdb7-6fpl9" podUID="fccac7f9-eeaa-4481-ab49-9e71dd8af79c" containerName="keystone-api" containerID="cri-o://366d9c7d12dc53287d03434a10b7448a9c6d02142fd4ec78034d67e5b49d8e4b" gracePeriod=30 Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.780058 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="4b977fc8-6c11-41e6-9500-f0da2d66aea1" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.104:5671: connect: connection refused" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.786893 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-pskhp"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.804875 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.808079 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-pskhp"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.821666 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-7147-account-create-q2lpp"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.827501 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-7147-account-create-q2lpp"] Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.854931 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "3b18812d-9eec-4254-8633-b40f55244e47" (UID: "3b18812d-9eec-4254-8633-b40f55244e47"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.856548 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "78a24140-d3a5-463a-aaf9-49857f14decc" (UID: "78a24140-d3a5-463a-aaf9-49857f14decc"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.872266 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3b18812d-9eec-4254-8633-b40f55244e47" (UID: "3b18812d-9eec-4254-8633-b40f55244e47"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.876245 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "78a24140-d3a5-463a-aaf9-49857f14decc" (UID: "78a24140-d3a5-463a-aaf9-49857f14decc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.905495 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "78a24140-d3a5-463a-aaf9-49857f14decc" (UID: "78a24140-d3a5-463a-aaf9-49857f14decc"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.905597 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-internal-tls-certs\") pod \"78a24140-d3a5-463a-aaf9-49857f14decc\" (UID: \"78a24140-d3a5-463a-aaf9-49857f14decc\") " Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.905975 5014 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.906886 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.906905 5014 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.906922 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b18812d-9eec-4254-8633-b40f55244e47-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:06 crc kubenswrapper[5014]: W1006 21:53:06.906936 5014 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/78a24140-d3a5-463a-aaf9-49857f14decc/volumes/kubernetes.io~secret/internal-tls-certs Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.906961 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "78a24140-d3a5-463a-aaf9-49857f14decc" (UID: "78a24140-d3a5-463a-aaf9-49857f14decc"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.926465 5014 generic.go:334] "Generic (PLEG): container finished" podID="d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349" containerID="8535cd122ecdef14d55810af72e50b213cf4fd419bc7559f9484489bc8b3b735" exitCode=0 Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.926553 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi4b16-account-delete-jhqrl" event={"ID":"d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349","Type":"ContainerDied","Data":"8535cd122ecdef14d55810af72e50b213cf4fd419bc7559f9484489bc8b3b735"} Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.958263 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="06e8dc30-8c95-4585-82e0-fc82de286a1c" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.165:8776/healthcheck\": read tcp 10.217.0.2:36504->10.217.0.165:8776: read: connection reset by peer" Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.965583 5014 generic.go:334] "Generic (PLEG): container finished" podID="6c301c8b-acb9-4008-9832-ce83dc524b6d" containerID="496962eeeb2ad1c6b5517af39859967ca3ba2462e7203bf88e13bcb22e74bca9" exitCode=0 Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.965723 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement8e33-account-delete-g7pdl" event={"ID":"6c301c8b-acb9-4008-9832-ce83dc524b6d","Type":"ContainerDied","Data":"496962eeeb2ad1c6b5517af39859967ca3ba2462e7203bf88e13bcb22e74bca9"} Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.965758 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement8e33-account-delete-g7pdl" event={"ID":"6c301c8b-acb9-4008-9832-ce83dc524b6d","Type":"ContainerStarted","Data":"6199fa82173b71577d05cba3c5bbd9b898999f5e05aa91f75d56593cbef0f0a8"} Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.975119 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" containerName="galera" containerID="cri-o://40dcdb5d7c50cb4d2ec51f4ef5316c1e1fba2901ad8cf7db90116b6c873b152a" gracePeriod=30 Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.985424 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3b18812d-9eec-4254-8633-b40f55244e47","Type":"ContainerDied","Data":"08394ba6b24bcdee49c1708e48bd4765cfbaf33846d9caad10f595fabf98baf2"} Oct 06 21:53:06 crc kubenswrapper[5014]: I1006 21:53:06.985790 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:06.999463 5014 generic.go:334] "Generic (PLEG): container finished" podID="c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c" containerID="ee04d15145622330280112d33fa03759b3233a0f8fba1afae6789c4d4d814cb6" exitCode=0 Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:06.999552 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance6d2b-account-delete-n47pm" event={"ID":"c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c","Type":"ContainerDied","Data":"ee04d15145622330280112d33fa03759b3233a0f8fba1afae6789c4d4d814cb6"} Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.008053 5014 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/78a24140-d3a5-463a-aaf9-49857f14decc-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.010742 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder3af6-account-delete-44b4q" event={"ID":"2d91cde4-29d5-4947-b2ee-73e29ac244c2","Type":"ContainerDied","Data":"c8bc3b2777e163d1cfbb3ad4b9b99a0b510d4b3fe38d78e7a3bdb2c5524a9f5a"} Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.010776 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8bc3b2777e163d1cfbb3ad4b9b99a0b510d4b3fe38d78e7a3bdb2c5524a9f5a" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.043793 5014 generic.go:334] "Generic (PLEG): container finished" podID="044b7af9-01a7-40c1-803c-30e568aaf1fe" containerID="d836f8e8bcb39601fc27693a322159da55ec8df8c24bea7d87e72544f5c6ca25" exitCode=1 Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.043945 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell1b5d1-account-delete-2bzts" event={"ID":"044b7af9-01a7-40c1-803c-30e568aaf1fe","Type":"ContainerDied","Data":"d836f8e8bcb39601fc27693a322159da55ec8df8c24bea7d87e72544f5c6ca25"} Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.052496 5014 generic.go:334] "Generic (PLEG): container finished" podID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerID="1c78227e99eced5448ed246e1bcbef1171c8d55eecc061f23758f282026f4fed" exitCode=0 Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.052530 5014 generic.go:334] "Generic (PLEG): container finished" podID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerID="ec20b9f41983e4b8a0ea0f9349995c36994eedc43877f52f349e3a2298e48326" exitCode=2 Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.052583 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"32e89ff3-0d60-4fd1-9c0d-831b92311165","Type":"ContainerDied","Data":"1c78227e99eced5448ed246e1bcbef1171c8d55eecc061f23758f282026f4fed"} Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.052611 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"32e89ff3-0d60-4fd1-9c0d-831b92311165","Type":"ContainerDied","Data":"ec20b9f41983e4b8a0ea0f9349995c36994eedc43877f52f349e3a2298e48326"} Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.058800 5014 generic.go:334] "Generic (PLEG): container finished" podID="24c4cd4c-297a-45d8-ad6f-e24e53736ecc" containerID="9baa658cf1475d9f5420e5c02783c38281b57faef5be3f69ab55c29aa8529137" exitCode=2 Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.058882 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"24c4cd4c-297a-45d8-ad6f-e24e53736ecc","Type":"ContainerDied","Data":"9baa658cf1475d9f5420e5c02783c38281b57faef5be3f69ab55c29aa8529137"} Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.060486 5014 generic.go:334] "Generic (PLEG): container finished" podID="6ca96007-c66c-4e95-84b7-12eff893cca2" containerID="c486ab912c7417dff548ef9db60b7c8a203254071119a11ef86f79a80452130d" exitCode=0 Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.060547 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbicanb6a2-account-delete-c6b5p" event={"ID":"6ca96007-c66c-4e95-84b7-12eff893cca2","Type":"ContainerDied","Data":"c486ab912c7417dff548ef9db60b7c8a203254071119a11ef86f79a80452130d"} Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.070166 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"49c2ecb8-63d7-4275-97ff-7aa899707212","Type":"ContainerDied","Data":"33fac91b5175fea94fcd5a738d6784dfa9f469df0acc66711d0dca96a16b2645"} Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.070205 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="33fac91b5175fea94fcd5a738d6784dfa9f469df0acc66711d0dca96a16b2645" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.074341 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0ce1b-account-delete-p9nfs" event={"ID":"17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43","Type":"ContainerStarted","Data":"55b66cf16d95823dc887b2d9d3ca9cadbc7015e7375de93759240008499fa1e9"} Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.074468 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/novacell0ce1b-account-delete-p9nfs" podUID="17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43" containerName="mariadb-account-delete" containerID="cri-o://55b66cf16d95823dc887b2d9d3ca9cadbc7015e7375de93759240008499fa1e9" gracePeriod=30 Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.095272 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novacell0ce1b-account-delete-p9nfs" podStartSLOduration=5.095255171 podStartE2EDuration="5.095255171s" podCreationTimestamp="2025-10-06 21:53:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 21:53:07.089497256 +0000 UTC m=+1332.382533980" watchObservedRunningTime="2025-10-06 21:53:07.095255171 +0000 UTC m=+1332.388291905" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.099794 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="c8f59d7d-f71b-46b0-bd32-476a2517a3b6" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.483864 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="1dfeacf8-a072-4b44-bed9-618acd31fb6f" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": read tcp 10.217.0.2:39992->10.217.0.205:8775: read: connection reset by peer" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.484198 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="1dfeacf8-a072-4b44-bed9-618acd31fb6f" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": read tcp 10.217.0.2:39978->10.217.0.205:8775: read: connection reset by peer" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.500334 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1132a0d0-bc9b-430d-a89e-33455c763b3c" path="/var/lib/kubelet/pods/1132a0d0-bc9b-430d-a89e-33455c763b3c/volumes" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.500940 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8069cfef-c703-4aa4-b1a4-6860fc1734db" path="/var/lib/kubelet/pods/8069cfef-c703-4aa4-b1a4-6860fc1734db/volumes" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.501405 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="862503fc-2bba-487c-aed2-83403621b99b" path="/var/lib/kubelet/pods/862503fc-2bba-487c-aed2-83403621b99b/volumes" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.502529 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ada193c9-b872-4490-bb95-a27e9f542aec" path="/var/lib/kubelet/pods/ada193c9-b872-4490-bb95-a27e9f542aec/volumes" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.503036 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb" path="/var/lib/kubelet/pods/c5cb1cdd-bc1c-4682-8ed4-6c2aee7bc8bb/volumes" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.503748 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff29a0b3-1307-4fdb-bead-68d87f2f2923" path="/var/lib/kubelet/pods/ff29a0b3-1307-4fdb-bead-68d87f2f2923/volumes" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.926265 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder3af6-account-delete-44b4q" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.927318 5014 scope.go:117] "RemoveContainer" containerID="085db7e17be44d9a79cea2381416abb7231f57828c4dbcfdbdfc446dac2fb6f0" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.945750 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.950253 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.950994 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.962211 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.964979 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell1b5d1-account-delete-2bzts" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.968854 5014 scope.go:117] "RemoveContainer" containerID="4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.969930 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi4b16-account-delete-jhqrl" Oct 06 21:53:07 crc kubenswrapper[5014]: I1006 21:53:07.996828 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance6d2b-account-delete-n47pm" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.001254 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement8e33-account-delete-g7pdl" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.009540 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbicanb6a2-account-delete-c6b5p" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.029118 5014 scope.go:117] "RemoveContainer" containerID="599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.029473 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-vencrypt-tls-certs\") pod \"49c2ecb8-63d7-4275-97ff-7aa899707212\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.029546 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nbkx\" (UniqueName: \"kubernetes.io/projected/2d91cde4-29d5-4947-b2ee-73e29ac244c2-kube-api-access-5nbkx\") pod \"2d91cde4-29d5-4947-b2ee-73e29ac244c2\" (UID: \"2d91cde4-29d5-4947-b2ee-73e29ac244c2\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.029589 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-75cpj\" (UniqueName: \"kubernetes.io/projected/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-api-access-75cpj\") pod \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\" (UID: \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.029680 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-combined-ca-bundle\") pod \"49c2ecb8-63d7-4275-97ff-7aa899707212\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.029722 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-combined-ca-bundle\") pod \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\" (UID: \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.029760 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-nova-novncproxy-tls-certs\") pod \"49c2ecb8-63d7-4275-97ff-7aa899707212\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.029811 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sql4v\" (UniqueName: \"kubernetes.io/projected/6c301c8b-acb9-4008-9832-ce83dc524b6d-kube-api-access-sql4v\") pod \"6c301c8b-acb9-4008-9832-ce83dc524b6d\" (UID: \"6c301c8b-acb9-4008-9832-ce83dc524b6d\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.029848 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phjfc\" (UniqueName: \"kubernetes.io/projected/044b7af9-01a7-40c1-803c-30e568aaf1fe-kube-api-access-phjfc\") pod \"044b7af9-01a7-40c1-803c-30e568aaf1fe\" (UID: \"044b7af9-01a7-40c1-803c-30e568aaf1fe\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.029900 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-state-metrics-tls-config\") pod \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\" (UID: \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.029973 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5ld4\" (UniqueName: \"kubernetes.io/projected/c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c-kube-api-access-g5ld4\") pod \"c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c\" (UID: \"c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.030037 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-state-metrics-tls-certs\") pod \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\" (UID: \"24c4cd4c-297a-45d8-ad6f-e24e53736ecc\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.030161 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sll76\" (UniqueName: \"kubernetes.io/projected/6ca96007-c66c-4e95-84b7-12eff893cca2-kube-api-access-sll76\") pod \"6ca96007-c66c-4e95-84b7-12eff893cca2\" (UID: \"6ca96007-c66c-4e95-84b7-12eff893cca2\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.030198 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxgjk\" (UniqueName: \"kubernetes.io/projected/d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349-kube-api-access-nxgjk\") pod \"d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349\" (UID: \"d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.030259 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-config-data\") pod \"49c2ecb8-63d7-4275-97ff-7aa899707212\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.030293 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zvv8\" (UniqueName: \"kubernetes.io/projected/49c2ecb8-63d7-4275-97ff-7aa899707212-kube-api-access-7zvv8\") pod \"49c2ecb8-63d7-4275-97ff-7aa899707212\" (UID: \"49c2ecb8-63d7-4275-97ff-7aa899707212\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.044013 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c2ecb8-63d7-4275-97ff-7aa899707212-kube-api-access-7zvv8" (OuterVolumeSpecName: "kube-api-access-7zvv8") pod "49c2ecb8-63d7-4275-97ff-7aa899707212" (UID: "49c2ecb8-63d7-4275-97ff-7aa899707212"). InnerVolumeSpecName "kube-api-access-7zvv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.044103 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-api-access-75cpj" (OuterVolumeSpecName: "kube-api-access-75cpj") pod "24c4cd4c-297a-45d8-ad6f-e24e53736ecc" (UID: "24c4cd4c-297a-45d8-ad6f-e24e53736ecc"). InnerVolumeSpecName "kube-api-access-75cpj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.044870 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/044b7af9-01a7-40c1-803c-30e568aaf1fe-kube-api-access-phjfc" (OuterVolumeSpecName: "kube-api-access-phjfc") pod "044b7af9-01a7-40c1-803c-30e568aaf1fe" (UID: "044b7af9-01a7-40c1-803c-30e568aaf1fe"). InnerVolumeSpecName "kube-api-access-phjfc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.050550 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c301c8b-acb9-4008-9832-ce83dc524b6d-kube-api-access-sql4v" (OuterVolumeSpecName: "kube-api-access-sql4v") pod "6c301c8b-acb9-4008-9832-ce83dc524b6d" (UID: "6c301c8b-acb9-4008-9832-ce83dc524b6d"). InnerVolumeSpecName "kube-api-access-sql4v". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.055582 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d91cde4-29d5-4947-b2ee-73e29ac244c2-kube-api-access-5nbkx" (OuterVolumeSpecName: "kube-api-access-5nbkx") pod "2d91cde4-29d5-4947-b2ee-73e29ac244c2" (UID: "2d91cde4-29d5-4947-b2ee-73e29ac244c2"). InnerVolumeSpecName "kube-api-access-5nbkx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.055721 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c-kube-api-access-g5ld4" (OuterVolumeSpecName: "kube-api-access-g5ld4") pod "c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c" (UID: "c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c"). InnerVolumeSpecName "kube-api-access-g5ld4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.086033 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.089007 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349-kube-api-access-nxgjk" (OuterVolumeSpecName: "kube-api-access-nxgjk") pod "d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349" (UID: "d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349"). InnerVolumeSpecName "kube-api-access-nxgjk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.124007 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.135681 5014 scope.go:117] "RemoveContainer" containerID="4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.136350 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-internal-tls-certs\") pod \"06e8dc30-8c95-4585-82e0-fc82de286a1c\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.136377 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/06e8dc30-8c95-4585-82e0-fc82de286a1c-etc-machine-id\") pod \"06e8dc30-8c95-4585-82e0-fc82de286a1c\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.136401 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-combined-ca-bundle\") pod \"06e8dc30-8c95-4585-82e0-fc82de286a1c\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.136473 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-config-data\") pod \"06e8dc30-8c95-4585-82e0-fc82de286a1c\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.136596 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wsf6r\" (UniqueName: \"kubernetes.io/projected/06e8dc30-8c95-4585-82e0-fc82de286a1c-kube-api-access-wsf6r\") pod \"06e8dc30-8c95-4585-82e0-fc82de286a1c\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.136652 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/06e8dc30-8c95-4585-82e0-fc82de286a1c-logs\") pod \"06e8dc30-8c95-4585-82e0-fc82de286a1c\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.136702 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-config-data-custom\") pod \"06e8dc30-8c95-4585-82e0-fc82de286a1c\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.136778 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-scripts\") pod \"06e8dc30-8c95-4585-82e0-fc82de286a1c\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.136802 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-public-tls-certs\") pod \"06e8dc30-8c95-4585-82e0-fc82de286a1c\" (UID: \"06e8dc30-8c95-4585-82e0-fc82de286a1c\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.136886 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06e8dc30-8c95-4585-82e0-fc82de286a1c-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "06e8dc30-8c95-4585-82e0-fc82de286a1c" (UID: "06e8dc30-8c95-4585-82e0-fc82de286a1c"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.137260 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zvv8\" (UniqueName: \"kubernetes.io/projected/49c2ecb8-63d7-4275-97ff-7aa899707212-kube-api-access-7zvv8\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.137278 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nbkx\" (UniqueName: \"kubernetes.io/projected/2d91cde4-29d5-4947-b2ee-73e29ac244c2-kube-api-access-5nbkx\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.137294 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-75cpj\" (UniqueName: \"kubernetes.io/projected/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-api-access-75cpj\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.137308 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sql4v\" (UniqueName: \"kubernetes.io/projected/6c301c8b-acb9-4008-9832-ce83dc524b6d-kube-api-access-sql4v\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.137321 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phjfc\" (UniqueName: \"kubernetes.io/projected/044b7af9-01a7-40c1-803c-30e568aaf1fe-kube-api-access-phjfc\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.137332 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5ld4\" (UniqueName: \"kubernetes.io/projected/c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c-kube-api-access-g5ld4\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.137344 5014 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/06e8dc30-8c95-4585-82e0-fc82de286a1c-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.137355 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxgjk\" (UniqueName: \"kubernetes.io/projected/d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349-kube-api-access-nxgjk\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.137867 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06e8dc30-8c95-4585-82e0-fc82de286a1c-logs" (OuterVolumeSpecName: "logs") pod "06e8dc30-8c95-4585-82e0-fc82de286a1c" (UID: "06e8dc30-8c95-4585-82e0-fc82de286a1c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: E1006 21:53:08.140754 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0\": container with ID starting with 4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0 not found: ID does not exist" containerID="4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.140836 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0"} err="failed to get container status \"4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0\": rpc error: code = NotFound desc = could not find container \"4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0\": container with ID starting with 4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0 not found: ID does not exist" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.140898 5014 scope.go:117] "RemoveContainer" containerID="599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.142849 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ca96007-c66c-4e95-84b7-12eff893cca2-kube-api-access-sll76" (OuterVolumeSpecName: "kube-api-access-sll76") pod "6ca96007-c66c-4e95-84b7-12eff893cca2" (UID: "6ca96007-c66c-4e95-84b7-12eff893cca2"). InnerVolumeSpecName "kube-api-access-sll76". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: E1006 21:53:08.157773 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb\": container with ID starting with 599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb not found: ID does not exist" containerID="599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.157991 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb"} err="failed to get container status \"599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb\": rpc error: code = NotFound desc = could not find container \"599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb\": container with ID starting with 599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb not found: ID does not exist" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.158342 5014 scope.go:117] "RemoveContainer" containerID="4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.163410 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0"} err="failed to get container status \"4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0\": rpc error: code = NotFound desc = could not find container \"4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0\": container with ID starting with 4f1f896f3133120abb2ff577ccc541196c018018d54ba173132c4dfa0bb7a1d0 not found: ID does not exist" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.163465 5014 scope.go:117] "RemoveContainer" containerID="599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.164132 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb"} err="failed to get container status \"599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb\": rpc error: code = NotFound desc = could not find container \"599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb\": container with ID starting with 599d35fc1026b8ec561dbc7f8a5e726ab756471e75637e8260aa748f900d0cbb not found: ID does not exist" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.164191 5014 scope.go:117] "RemoveContainer" containerID="c8aaf100576649e3e25587bb86a9bf5da8f2697e6e40a96847857214fba91a73" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.167507 5014 generic.go:334] "Generic (PLEG): container finished" podID="4a3bf50b-cb91-4201-affd-0c42d3585df2" containerID="f799cb043ff7d806ae4f877f34b2084e03598231d0c40a3275884b945a348c9e" exitCode=0 Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.167611 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4a3bf50b-cb91-4201-affd-0c42d3585df2","Type":"ContainerDied","Data":"f799cb043ff7d806ae4f877f34b2084e03598231d0c40a3275884b945a348c9e"} Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.168744 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-scripts" (OuterVolumeSpecName: "scripts") pod "06e8dc30-8c95-4585-82e0-fc82de286a1c" (UID: "06e8dc30-8c95-4585-82e0-fc82de286a1c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.184253 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "06e8dc30-8c95-4585-82e0-fc82de286a1c" (UID: "06e8dc30-8c95-4585-82e0-fc82de286a1c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.190435 5014 generic.go:334] "Generic (PLEG): container finished" podID="1dfeacf8-a072-4b44-bed9-618acd31fb6f" containerID="6d9c705385f5fdafb7775a2b08e388c98d6323f0cc8e8398d03748b74369b48a" exitCode=0 Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.190508 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1dfeacf8-a072-4b44-bed9-618acd31fb6f","Type":"ContainerDied","Data":"6d9c705385f5fdafb7775a2b08e388c98d6323f0cc8e8398d03748b74369b48a"} Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.196023 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi4b16-account-delete-jhqrl" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.196065 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi4b16-account-delete-jhqrl" event={"ID":"d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349","Type":"ContainerDied","Data":"8a9c1e9521e1fcde2011f64017b09796b05119281d58d0104ea3dc5bdf0fcf18"} Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.199453 5014 generic.go:334] "Generic (PLEG): container finished" podID="3c1e8f70-227b-40e0-aceb-470eed382180" containerID="b619016e22f6883fb4aa46369b1d687e057d2e2ffd28c9ca34b55365fb9839c5" exitCode=0 Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.199524 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3c1e8f70-227b-40e0-aceb-470eed382180","Type":"ContainerDied","Data":"b619016e22f6883fb4aa46369b1d687e057d2e2ffd28c9ca34b55365fb9839c5"} Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.203232 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06e8dc30-8c95-4585-82e0-fc82de286a1c-kube-api-access-wsf6r" (OuterVolumeSpecName: "kube-api-access-wsf6r") pod "06e8dc30-8c95-4585-82e0-fc82de286a1c" (UID: "06e8dc30-8c95-4585-82e0-fc82de286a1c"). InnerVolumeSpecName "kube-api-access-wsf6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.203677 5014 generic.go:334] "Generic (PLEG): container finished" podID="daa5c33b-d941-4030-bf9f-cd6ed831986e" containerID="1b5ca8147b22af5bd68e4073c702853e918a3da61cc14255747ea6cd2310a427" exitCode=0 Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.203794 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"daa5c33b-d941-4030-bf9f-cd6ed831986e","Type":"ContainerDied","Data":"1b5ca8147b22af5bd68e4073c702853e918a3da61cc14255747ea6cd2310a427"} Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.209408 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance6d2b-account-delete-n47pm" event={"ID":"c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c","Type":"ContainerDied","Data":"f6f375e6a5f35c31f2cc6ba298a50d7c2005606af2f1ec61b80bf43d94a71e13"} Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.209498 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance6d2b-account-delete-n47pm" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.220973 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"24c4cd4c-297a-45d8-ad6f-e24e53736ecc","Type":"ContainerDied","Data":"fa176ff3396b5dac4958fb54a4067be44d7861bf720692cc71185f34d2a1f4ac"} Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.221069 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.228587 5014 generic.go:334] "Generic (PLEG): container finished" podID="411ad591-8dad-46ef-8a44-88e86f5c86dd" containerID="e8673788407b2a922e671f5a29648d8cec35c573c85be82d267e82064c4a8203" exitCode=0 Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.228658 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f4ddcc578-kbrhw" event={"ID":"411ad591-8dad-46ef-8a44-88e86f5c86dd","Type":"ContainerDied","Data":"e8673788407b2a922e671f5a29648d8cec35c573c85be82d267e82064c4a8203"} Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.229256 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f4ddcc578-kbrhw" event={"ID":"411ad591-8dad-46ef-8a44-88e86f5c86dd","Type":"ContainerDied","Data":"1eb9237c7e9f252e8de293577b7af7ea2510be02c4bea5814397c0d261378073"} Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.229318 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f4ddcc578-kbrhw" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.234315 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbicanb6a2-account-delete-c6b5p" event={"ID":"6ca96007-c66c-4e95-84b7-12eff893cca2","Type":"ContainerDied","Data":"74251ca83d284fe7649fba35fd5fd4027dc8e3058303e83b341178b0c7e22ecb"} Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.234366 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbicanb6a2-account-delete-c6b5p" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.238785 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-config-data\") pod \"411ad591-8dad-46ef-8a44-88e86f5c86dd\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.238832 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smdpw\" (UniqueName: \"kubernetes.io/projected/411ad591-8dad-46ef-8a44-88e86f5c86dd-kube-api-access-smdpw\") pod \"411ad591-8dad-46ef-8a44-88e86f5c86dd\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.239021 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-public-tls-certs\") pod \"411ad591-8dad-46ef-8a44-88e86f5c86dd\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.239048 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/411ad591-8dad-46ef-8a44-88e86f5c86dd-logs\") pod \"411ad591-8dad-46ef-8a44-88e86f5c86dd\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.239139 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-internal-tls-certs\") pod \"411ad591-8dad-46ef-8a44-88e86f5c86dd\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.239195 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-combined-ca-bundle\") pod \"411ad591-8dad-46ef-8a44-88e86f5c86dd\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.239226 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-scripts\") pod \"411ad591-8dad-46ef-8a44-88e86f5c86dd\" (UID: \"411ad591-8dad-46ef-8a44-88e86f5c86dd\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.239557 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wsf6r\" (UniqueName: \"kubernetes.io/projected/06e8dc30-8c95-4585-82e0-fc82de286a1c-kube-api-access-wsf6r\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.239571 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/06e8dc30-8c95-4585-82e0-fc82de286a1c-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.239581 5014 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.239589 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.239598 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sll76\" (UniqueName: \"kubernetes.io/projected/6ca96007-c66c-4e95-84b7-12eff893cca2-kube-api-access-sll76\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.247839 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/411ad591-8dad-46ef-8a44-88e86f5c86dd-logs" (OuterVolumeSpecName: "logs") pod "411ad591-8dad-46ef-8a44-88e86f5c86dd" (UID: "411ad591-8dad-46ef-8a44-88e86f5c86dd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.253838 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/411ad591-8dad-46ef-8a44-88e86f5c86dd-kube-api-access-smdpw" (OuterVolumeSpecName: "kube-api-access-smdpw") pod "411ad591-8dad-46ef-8a44-88e86f5c86dd" (UID: "411ad591-8dad-46ef-8a44-88e86f5c86dd"). InnerVolumeSpecName "kube-api-access-smdpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.269087 5014 generic.go:334] "Generic (PLEG): container finished" podID="06e8dc30-8c95-4585-82e0-fc82de286a1c" containerID="a4c0aa6211dfec0a5eda7b347326298f80376112ca717764da53c00f048df69b" exitCode=0 Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.269185 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"06e8dc30-8c95-4585-82e0-fc82de286a1c","Type":"ContainerDied","Data":"a4c0aa6211dfec0a5eda7b347326298f80376112ca717764da53c00f048df69b"} Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.269220 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"06e8dc30-8c95-4585-82e0-fc82de286a1c","Type":"ContainerDied","Data":"4a593f9dd7435ec0b93f8a4db7e1f1773704e5187c941dab328f3ec8a3edbcc2"} Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.269298 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.276074 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement8e33-account-delete-g7pdl" event={"ID":"6c301c8b-acb9-4008-9832-ce83dc524b6d","Type":"ContainerDied","Data":"6199fa82173b71577d05cba3c5bbd9b898999f5e05aa91f75d56593cbef0f0a8"} Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.276169 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement8e33-account-delete-g7pdl" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.282041 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-scripts" (OuterVolumeSpecName: "scripts") pod "411ad591-8dad-46ef-8a44-88e86f5c86dd" (UID: "411ad591-8dad-46ef-8a44-88e86f5c86dd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.282770 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell1b5d1-account-delete-2bzts" event={"ID":"044b7af9-01a7-40c1-803c-30e568aaf1fe","Type":"ContainerDied","Data":"8f8633dea0e82b35bb874aa415d7499f833675234055285f31c2effdddf56e5f"} Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.282863 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell1b5d1-account-delete-2bzts" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.286687 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "49c2ecb8-63d7-4275-97ff-7aa899707212" (UID: "49c2ecb8-63d7-4275-97ff-7aa899707212"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.293434 5014 generic.go:334] "Generic (PLEG): container finished" podID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerID="1e488f802cc8da011f91e05b3c478b47334580c1a8c26408bcadc4e55ec4cd81" exitCode=0 Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.293527 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder3af6-account-delete-44b4q" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.293836 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"32e89ff3-0d60-4fd1-9c0d-831b92311165","Type":"ContainerDied","Data":"1e488f802cc8da011f91e05b3c478b47334580c1a8c26408bcadc4e55ec4cd81"} Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.294018 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 06 21:53:08 crc kubenswrapper[5014]: E1006 21:53:08.315772 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="40dcdb5d7c50cb4d2ec51f4ef5316c1e1fba2901ad8cf7db90116b6c873b152a" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Oct 06 21:53:08 crc kubenswrapper[5014]: E1006 21:53:08.324915 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="40dcdb5d7c50cb4d2ec51f4ef5316c1e1fba2901ad8cf7db90116b6c873b152a" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Oct 06 21:53:08 crc kubenswrapper[5014]: E1006 21:53:08.328822 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="40dcdb5d7c50cb4d2ec51f4ef5316c1e1fba2901ad8cf7db90116b6c873b152a" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Oct 06 21:53:08 crc kubenswrapper[5014]: E1006 21:53:08.328883 5014 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" containerName="galera" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.341072 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/411ad591-8dad-46ef-8a44-88e86f5c86dd-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.341114 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.341127 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.341143 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smdpw\" (UniqueName: \"kubernetes.io/projected/411ad591-8dad-46ef-8a44-88e86f5c86dd-kube-api-access-smdpw\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.357699 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "24c4cd4c-297a-45d8-ad6f-e24e53736ecc" (UID: "24c4cd4c-297a-45d8-ad6f-e24e53736ecc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.362778 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "24c4cd4c-297a-45d8-ad6f-e24e53736ecc" (UID: "24c4cd4c-297a-45d8-ad6f-e24e53736ecc"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.391609 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "49c2ecb8-63d7-4275-97ff-7aa899707212" (UID: "49c2ecb8-63d7-4275-97ff-7aa899707212"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.399395 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "06e8dc30-8c95-4585-82e0-fc82de286a1c" (UID: "06e8dc30-8c95-4585-82e0-fc82de286a1c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.418148 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "24c4cd4c-297a-45d8-ad6f-e24e53736ecc" (UID: "24c4cd4c-297a-45d8-ad6f-e24e53736ecc"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.427736 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "49c2ecb8-63d7-4275-97ff-7aa899707212" (UID: "49c2ecb8-63d7-4275-97ff-7aa899707212"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.442734 5014 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.442770 5014 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.442788 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.442800 5014 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.442813 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24c4cd4c-297a-45d8-ad6f-e24e53736ecc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.442824 5014 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.449335 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-config-data" (OuterVolumeSpecName: "config-data") pod "06e8dc30-8c95-4585-82e0-fc82de286a1c" (UID: "06e8dc30-8c95-4585-82e0-fc82de286a1c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.509469 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "06e8dc30-8c95-4585-82e0-fc82de286a1c" (UID: "06e8dc30-8c95-4585-82e0-fc82de286a1c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.511648 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "06e8dc30-8c95-4585-82e0-fc82de286a1c" (UID: "06e8dc30-8c95-4585-82e0-fc82de286a1c"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.517094 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "411ad591-8dad-46ef-8a44-88e86f5c86dd" (UID: "411ad591-8dad-46ef-8a44-88e86f5c86dd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.518585 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-config-data" (OuterVolumeSpecName: "config-data") pod "49c2ecb8-63d7-4275-97ff-7aa899707212" (UID: "49c2ecb8-63d7-4275-97ff-7aa899707212"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.519408 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-config-data" (OuterVolumeSpecName: "config-data") pod "411ad591-8dad-46ef-8a44-88e86f5c86dd" (UID: "411ad591-8dad-46ef-8a44-88e86f5c86dd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.546209 5014 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.546441 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.546513 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c2ecb8-63d7-4275-97ff-7aa899707212-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.546578 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.546692 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.546764 5014 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/06e8dc30-8c95-4585-82e0-fc82de286a1c-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.608085 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "411ad591-8dad-46ef-8a44-88e86f5c86dd" (UID: "411ad591-8dad-46ef-8a44-88e86f5c86dd"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.650332 5014 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.669997 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "411ad591-8dad-46ef-8a44-88e86f5c86dd" (UID: "411ad591-8dad-46ef-8a44-88e86f5c86dd"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.755161 5014 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/411ad591-8dad-46ef-8a44-88e86f5c86dd-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.893141 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.914261 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.928670 5014 scope.go:117] "RemoveContainer" containerID="a6e535773cd137a5bab0a6870ca512a9e234a397494ec79686683eaaade66b11" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.960309 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-config-data\") pod \"4a3bf50b-cb91-4201-affd-0c42d3585df2\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.960376 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-combined-ca-bundle\") pod \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.960405 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-config-data\") pod \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.960466 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-nova-metadata-tls-certs\") pod \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.960526 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-combined-ca-bundle\") pod \"4a3bf50b-cb91-4201-affd-0c42d3585df2\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.960609 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-scripts\") pod \"4a3bf50b-cb91-4201-affd-0c42d3585df2\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.960648 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkdlm\" (UniqueName: \"kubernetes.io/projected/4a3bf50b-cb91-4201-affd-0c42d3585df2-kube-api-access-gkdlm\") pod \"4a3bf50b-cb91-4201-affd-0c42d3585df2\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.960696 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a3bf50b-cb91-4201-affd-0c42d3585df2-logs\") pod \"4a3bf50b-cb91-4201-affd-0c42d3585df2\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.960717 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpwq4\" (UniqueName: \"kubernetes.io/projected/1dfeacf8-a072-4b44-bed9-618acd31fb6f-kube-api-access-kpwq4\") pod \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.960733 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1dfeacf8-a072-4b44-bed9-618acd31fb6f-logs\") pod \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\" (UID: \"1dfeacf8-a072-4b44-bed9-618acd31fb6f\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.960754 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-public-tls-certs\") pod \"4a3bf50b-cb91-4201-affd-0c42d3585df2\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.960773 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"4a3bf50b-cb91-4201-affd-0c42d3585df2\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.960796 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a3bf50b-cb91-4201-affd-0c42d3585df2-httpd-run\") pod \"4a3bf50b-cb91-4201-affd-0c42d3585df2\" (UID: \"4a3bf50b-cb91-4201-affd-0c42d3585df2\") " Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.962509 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a3bf50b-cb91-4201-affd-0c42d3585df2-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4a3bf50b-cb91-4201-affd-0c42d3585df2" (UID: "4a3bf50b-cb91-4201-affd-0c42d3585df2"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.964640 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1dfeacf8-a072-4b44-bed9-618acd31fb6f-logs" (OuterVolumeSpecName: "logs") pod "1dfeacf8-a072-4b44-bed9-618acd31fb6f" (UID: "1dfeacf8-a072-4b44-bed9-618acd31fb6f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.965431 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a3bf50b-cb91-4201-affd-0c42d3585df2-logs" (OuterVolumeSpecName: "logs") pod "4a3bf50b-cb91-4201-affd-0c42d3585df2" (UID: "4a3bf50b-cb91-4201-affd-0c42d3585df2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.968217 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.982528 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.983198 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.992408 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1dfeacf8-a072-4b44-bed9-618acd31fb6f-kube-api-access-kpwq4" (OuterVolumeSpecName: "kube-api-access-kpwq4") pod "1dfeacf8-a072-4b44-bed9-618acd31fb6f" (UID: "1dfeacf8-a072-4b44-bed9-618acd31fb6f"). InnerVolumeSpecName "kube-api-access-kpwq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:08 crc kubenswrapper[5014]: I1006 21:53:08.993116 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-scripts" (OuterVolumeSpecName: "scripts") pod "4a3bf50b-cb91-4201-affd-0c42d3585df2" (UID: "4a3bf50b-cb91-4201-affd-0c42d3585df2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.001961 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.013903 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.203:3000/\": dial tcp 10.217.0.203:3000: connect: connection refused" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.014818 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a3bf50b-cb91-4201-affd-0c42d3585df2-kube-api-access-gkdlm" (OuterVolumeSpecName: "kube-api-access-gkdlm") pod "4a3bf50b-cb91-4201-affd-0c42d3585df2" (UID: "4a3bf50b-cb91-4201-affd-0c42d3585df2"). InnerVolumeSpecName "kube-api-access-gkdlm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.014855 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "4a3bf50b-cb91-4201-affd-0c42d3585df2" (UID: "4a3bf50b-cb91-4201-affd-0c42d3585df2"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.017693 5014 scope.go:117] "RemoveContainer" containerID="1ca1df3e861d5336e4515ca03c187e532ee4429553cf2f9930ba7e1d9925c254" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.028678 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbicanb6a2-account-delete-c6b5p"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.031883 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbicanb6a2-account-delete-c6b5p"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.043343 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-config-data" (OuterVolumeSpecName: "config-data") pod "1dfeacf8-a072-4b44-bed9-618acd31fb6f" (UID: "1dfeacf8-a072-4b44-bed9-618acd31fb6f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.060909 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell1b5d1-account-delete-2bzts"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.063321 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"daa5c33b-d941-4030-bf9f-cd6ed831986e\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.063479 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6w6t\" (UniqueName: \"kubernetes.io/projected/daa5c33b-d941-4030-bf9f-cd6ed831986e-kube-api-access-v6w6t\") pod \"daa5c33b-d941-4030-bf9f-cd6ed831986e\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.063940 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-combined-ca-bundle\") pod \"3c1e8f70-227b-40e0-aceb-470eed382180\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.064063 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/daa5c33b-d941-4030-bf9f-cd6ed831986e-logs\") pod \"daa5c33b-d941-4030-bf9f-cd6ed831986e\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.064161 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-config-data\") pod \"3c1e8f70-227b-40e0-aceb-470eed382180\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.064250 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-internal-tls-certs\") pod \"daa5c33b-d941-4030-bf9f-cd6ed831986e\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.064313 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-config-data\") pod \"daa5c33b-d941-4030-bf9f-cd6ed831986e\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.064425 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c1e8f70-227b-40e0-aceb-470eed382180-logs\") pod \"3c1e8f70-227b-40e0-aceb-470eed382180\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.064519 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-public-tls-certs\") pod \"3c1e8f70-227b-40e0-aceb-470eed382180\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.064730 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vhkvp\" (UniqueName: \"kubernetes.io/projected/3c1e8f70-227b-40e0-aceb-470eed382180-kube-api-access-vhkvp\") pod \"3c1e8f70-227b-40e0-aceb-470eed382180\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.064810 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-internal-tls-certs\") pod \"3c1e8f70-227b-40e0-aceb-470eed382180\" (UID: \"3c1e8f70-227b-40e0-aceb-470eed382180\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.064895 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-combined-ca-bundle\") pod \"daa5c33b-d941-4030-bf9f-cd6ed831986e\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.064977 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/daa5c33b-d941-4030-bf9f-cd6ed831986e-httpd-run\") pod \"daa5c33b-d941-4030-bf9f-cd6ed831986e\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.065052 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-scripts\") pod \"daa5c33b-d941-4030-bf9f-cd6ed831986e\" (UID: \"daa5c33b-d941-4030-bf9f-cd6ed831986e\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.065524 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.071953 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkdlm\" (UniqueName: \"kubernetes.io/projected/4a3bf50b-cb91-4201-affd-0c42d3585df2-kube-api-access-gkdlm\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.072043 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a3bf50b-cb91-4201-affd-0c42d3585df2-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.072115 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpwq4\" (UniqueName: \"kubernetes.io/projected/1dfeacf8-a072-4b44-bed9-618acd31fb6f-kube-api-access-kpwq4\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.072172 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1dfeacf8-a072-4b44-bed9-618acd31fb6f-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.072309 5014 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.072385 5014 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a3bf50b-cb91-4201-affd-0c42d3585df2-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.072456 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.064542 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/daa5c33b-d941-4030-bf9f-cd6ed831986e-logs" (OuterVolumeSpecName: "logs") pod "daa5c33b-d941-4030-bf9f-cd6ed831986e" (UID: "daa5c33b-d941-4030-bf9f-cd6ed831986e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.066082 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c1e8f70-227b-40e0-aceb-470eed382180-logs" (OuterVolumeSpecName: "logs") pod "3c1e8f70-227b-40e0-aceb-470eed382180" (UID: "3c1e8f70-227b-40e0-aceb-470eed382180"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.066938 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/daa5c33b-d941-4030-bf9f-cd6ed831986e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "daa5c33b-d941-4030-bf9f-cd6ed831986e" (UID: "daa5c33b-d941-4030-bf9f-cd6ed831986e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.071809 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-scripts" (OuterVolumeSpecName: "scripts") pod "daa5c33b-d941-4030-bf9f-cd6ed831986e" (UID: "daa5c33b-d941-4030-bf9f-cd6ed831986e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.072106 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/daa5c33b-d941-4030-bf9f-cd6ed831986e-kube-api-access-v6w6t" (OuterVolumeSpecName: "kube-api-access-v6w6t") pod "daa5c33b-d941-4030-bf9f-cd6ed831986e" (UID: "daa5c33b-d941-4030-bf9f-cd6ed831986e"). InnerVolumeSpecName "kube-api-access-v6w6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.072178 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1dfeacf8-a072-4b44-bed9-618acd31fb6f" (UID: "1dfeacf8-a072-4b44-bed9-618acd31fb6f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.072173 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.067710 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell1b5d1-account-delete-2bzts"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.100281 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "daa5c33b-d941-4030-bf9f-cd6ed831986e" (UID: "daa5c33b-d941-4030-bf9f-cd6ed831986e"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.101037 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement8e33-account-delete-g7pdl"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.101087 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement8e33-account-delete-g7pdl"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.117494 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c1e8f70-227b-40e0-aceb-470eed382180-kube-api-access-vhkvp" (OuterVolumeSpecName: "kube-api-access-vhkvp") pod "3c1e8f70-227b-40e0-aceb-470eed382180" (UID: "3c1e8f70-227b-40e0-aceb-470eed382180"). InnerVolumeSpecName "kube-api-access-vhkvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.130771 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.137216 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-f4ddcc578-kbrhw"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.143416 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4a3bf50b-cb91-4201-affd-0c42d3585df2" (UID: "4a3bf50b-cb91-4201-affd-0c42d3585df2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.155065 5014 scope.go:117] "RemoveContainer" containerID="6437ea266193af7f5429b5e22c496e67c18b4a8bbe1772f6583ef433e1d820e1" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.165871 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-f4ddcc578-kbrhw"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.177000 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pml5d\" (UniqueName: \"kubernetes.io/projected/0bd07ab2-973f-4531-8e5f-68d349e231b4-kube-api-access-pml5d\") pod \"0bd07ab2-973f-4531-8e5f-68d349e231b4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.177080 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-combined-ca-bundle\") pod \"0bd07ab2-973f-4531-8e5f-68d349e231b4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.177197 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-config-data\") pod \"0bd07ab2-973f-4531-8e5f-68d349e231b4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.177225 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd16c866-91b0-4261-a084-7a96ac597c04-combined-ca-bundle\") pod \"cd16c866-91b0-4261-a084-7a96ac597c04\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.177255 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-config-data-custom\") pod \"0bd07ab2-973f-4531-8e5f-68d349e231b4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.177308 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4vcq\" (UniqueName: \"kubernetes.io/projected/cd16c866-91b0-4261-a084-7a96ac597c04-kube-api-access-g4vcq\") pod \"cd16c866-91b0-4261-a084-7a96ac597c04\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.177347 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd07ab2-973f-4531-8e5f-68d349e231b4-logs\") pod \"0bd07ab2-973f-4531-8e5f-68d349e231b4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.177378 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-internal-tls-certs\") pod \"0bd07ab2-973f-4531-8e5f-68d349e231b4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.177403 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cd16c866-91b0-4261-a084-7a96ac597c04-kolla-config\") pod \"cd16c866-91b0-4261-a084-7a96ac597c04\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.177498 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-public-tls-certs\") pod \"0bd07ab2-973f-4531-8e5f-68d349e231b4\" (UID: \"0bd07ab2-973f-4531-8e5f-68d349e231b4\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.177549 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cd16c866-91b0-4261-a084-7a96ac597c04-config-data\") pod \"cd16c866-91b0-4261-a084-7a96ac597c04\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.177582 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd16c866-91b0-4261-a084-7a96ac597c04-memcached-tls-certs\") pod \"cd16c866-91b0-4261-a084-7a96ac597c04\" (UID: \"cd16c866-91b0-4261-a084-7a96ac597c04\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.178015 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.178042 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vhkvp\" (UniqueName: \"kubernetes.io/projected/3c1e8f70-227b-40e0-aceb-470eed382180-kube-api-access-vhkvp\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.178058 5014 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/daa5c33b-d941-4030-bf9f-cd6ed831986e-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.178069 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.178082 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.178108 5014 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.178121 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6w6t\" (UniqueName: \"kubernetes.io/projected/daa5c33b-d941-4030-bf9f-cd6ed831986e-kube-api-access-v6w6t\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.178132 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/daa5c33b-d941-4030-bf9f-cd6ed831986e-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.178143 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c1e8f70-227b-40e0-aceb-470eed382180-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.179946 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder3af6-account-delete-44b4q"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.181904 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd16c866-91b0-4261-a084-7a96ac597c04-config-data" (OuterVolumeSpecName: "config-data") pod "cd16c866-91b0-4261-a084-7a96ac597c04" (UID: "cd16c866-91b0-4261-a084-7a96ac597c04"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.182292 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0bd07ab2-973f-4531-8e5f-68d349e231b4-logs" (OuterVolumeSpecName: "logs") pod "0bd07ab2-973f-4531-8e5f-68d349e231b4" (UID: "0bd07ab2-973f-4531-8e5f-68d349e231b4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.186502 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd16c866-91b0-4261-a084-7a96ac597c04-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "cd16c866-91b0-4261-a084-7a96ac597c04" (UID: "cd16c866-91b0-4261-a084-7a96ac597c04"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.190435 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder3af6-account-delete-44b4q"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.206868 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd16c866-91b0-4261-a084-7a96ac597c04-kube-api-access-g4vcq" (OuterVolumeSpecName: "kube-api-access-g4vcq") pod "cd16c866-91b0-4261-a084-7a96ac597c04" (UID: "cd16c866-91b0-4261-a084-7a96ac597c04"). InnerVolumeSpecName "kube-api-access-g4vcq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.207465 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance6d2b-account-delete-n47pm"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.214845 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0bd07ab2-973f-4531-8e5f-68d349e231b4" (UID: "0bd07ab2-973f-4531-8e5f-68d349e231b4"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.218945 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "1dfeacf8-a072-4b44-bed9-618acd31fb6f" (UID: "1dfeacf8-a072-4b44-bed9-618acd31fb6f"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.219115 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bd07ab2-973f-4531-8e5f-68d349e231b4-kube-api-access-pml5d" (OuterVolumeSpecName: "kube-api-access-pml5d") pod "0bd07ab2-973f-4531-8e5f-68d349e231b4" (UID: "0bd07ab2-973f-4531-8e5f-68d349e231b4"). InnerVolumeSpecName "kube-api-access-pml5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.222224 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance6d2b-account-delete-n47pm"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.229163 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.244829 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3c1e8f70-227b-40e0-aceb-470eed382180" (UID: "3c1e8f70-227b-40e0-aceb-470eed382180"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.245260 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.249313 5014 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.252755 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapi4b16-account-delete-jhqrl"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.268340 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novaapi4b16-account-delete-jhqrl"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.268375 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-config-data" (OuterVolumeSpecName: "config-data") pod "4a3bf50b-cb91-4201-affd-0c42d3585df2" (UID: "4a3bf50b-cb91-4201-affd-0c42d3585df2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.281675 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cd16c866-91b0-4261-a084-7a96ac597c04-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.281761 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pml5d\" (UniqueName: \"kubernetes.io/projected/0bd07ab2-973f-4531-8e5f-68d349e231b4-kube-api-access-pml5d\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.281776 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.281790 5014 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.281863 5014 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.281872 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4vcq\" (UniqueName: \"kubernetes.io/projected/cd16c866-91b0-4261-a084-7a96ac597c04-kube-api-access-g4vcq\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.281905 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd07ab2-973f-4531-8e5f-68d349e231b4-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.281918 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.281926 5014 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cd16c866-91b0-4261-a084-7a96ac597c04-kolla-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.281935 5014 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dfeacf8-a072-4b44-bed9-618acd31fb6f-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.289719 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.295087 5014 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.320632 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "daa5c33b-d941-4030-bf9f-cd6ed831986e" (UID: "daa5c33b-d941-4030-bf9f-cd6ed831986e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.330964 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.346327 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-config-data" (OuterVolumeSpecName: "config-data") pod "3c1e8f70-227b-40e0-aceb-470eed382180" (UID: "3c1e8f70-227b-40e0-aceb-470eed382180"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.354327 5014 generic.go:334] "Generic (PLEG): container finished" podID="2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" containerID="40dcdb5d7c50cb4d2ec51f4ef5316c1e1fba2901ad8cf7db90116b6c873b152a" exitCode=0 Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.354417 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd","Type":"ContainerDied","Data":"40dcdb5d7c50cb4d2ec51f4ef5316c1e1fba2901ad8cf7db90116b6c873b152a"} Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.354452 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd","Type":"ContainerDied","Data":"1b002e4dd7c369d9795a24bfa89f81639f0653181764f6199e6579c4ea3af0e3"} Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.354470 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b002e4dd7c369d9795a24bfa89f81639f0653181764f6199e6579c4ea3af0e3" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.366446 5014 generic.go:334] "Generic (PLEG): container finished" podID="13fe806f-0e4e-4ea7-838b-938c5fe74c99" containerID="dec3beff6abbdcf32f4d602873fe4ab229755aef532b1c3308ed69b08438e50e" exitCode=0 Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.366540 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-697f765b44-77s6g" event={"ID":"13fe806f-0e4e-4ea7-838b-938c5fe74c99","Type":"ContainerDied","Data":"dec3beff6abbdcf32f4d602873fe4ab229755aef532b1c3308ed69b08438e50e"} Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.371062 5014 generic.go:334] "Generic (PLEG): container finished" podID="cd16c866-91b0-4261-a084-7a96ac597c04" containerID="cdc30334797cf0d0a1abb5ab3b3e87dbac8996fc837919c9242d311431f403b2" exitCode=0 Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.371134 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"cd16c866-91b0-4261-a084-7a96ac597c04","Type":"ContainerDied","Data":"cdc30334797cf0d0a1abb5ab3b3e87dbac8996fc837919c9242d311431f403b2"} Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.371163 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"cd16c866-91b0-4261-a084-7a96ac597c04","Type":"ContainerDied","Data":"020ebec827fa632394d91894455ff61a17986eaf8dbf508e23037a819940b99f"} Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.371238 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.378179 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4a3bf50b-cb91-4201-affd-0c42d3585df2","Type":"ContainerDied","Data":"aeb166394dfde50d2fd4ec25716d20aa19a01c64160be32d3a395f08beb79724"} Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.378294 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.387698 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.387727 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.387739 5014 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.400112 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-config-data" (OuterVolumeSpecName: "config-data") pod "daa5c33b-d941-4030-bf9f-cd6ed831986e" (UID: "daa5c33b-d941-4030-bf9f-cd6ed831986e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.408439 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1dfeacf8-a072-4b44-bed9-618acd31fb6f","Type":"ContainerDied","Data":"73bb5c470d731091eacea54b1e2fbb3b855ee9d6a47fd0909ea739c6ea8256af"} Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.408560 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.410281 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "daa5c33b-d941-4030-bf9f-cd6ed831986e" (UID: "daa5c33b-d941-4030-bf9f-cd6ed831986e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.413583 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3c1e8f70-227b-40e0-aceb-470eed382180","Type":"ContainerDied","Data":"b53b6ef9cb749992ee048456f7510b1eed0642e98177c348d1062f39223aeac6"} Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.413687 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.416011 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3c1e8f70-227b-40e0-aceb-470eed382180" (UID: "3c1e8f70-227b-40e0-aceb-470eed382180"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.421907 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0bd07ab2-973f-4531-8e5f-68d349e231b4" (UID: "0bd07ab2-973f-4531-8e5f-68d349e231b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.431936 5014 generic.go:334] "Generic (PLEG): container finished" podID="56ddac83-2b9f-4a7d-a9c9-02de44e0ab00" containerID="6e30f54354b4e05d72cd38208e8b8588c2363ab84617b170457752ebb404a286" exitCode=0 Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.432043 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-849cf44bc5-9qnb4" event={"ID":"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00","Type":"ContainerDied","Data":"6e30f54354b4e05d72cd38208e8b8588c2363ab84617b170457752ebb404a286"} Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.439375 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4a3bf50b-cb91-4201-affd-0c42d3585df2" (UID: "4a3bf50b-cb91-4201-affd-0c42d3585df2"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: E1006 21:53:09.445730 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b521f92d690d7f4582f8adf8d2826fa33c967ac340f2621ad4c5efb64bf29cfb" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 06 21:53:09 crc kubenswrapper[5014]: E1006 21:53:09.452716 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b521f92d690d7f4582f8adf8d2826fa33c967ac340f2621ad4c5efb64bf29cfb" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.452887 5014 generic.go:334] "Generic (PLEG): container finished" podID="0bd07ab2-973f-4531-8e5f-68d349e231b4" containerID="179498d178eaf12fad9776b3020c6b558f9bd713b5bcdcb7b15c957545542e01" exitCode=0 Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.453059 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-598975567d-rtcs4" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.453209 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-598975567d-rtcs4" event={"ID":"0bd07ab2-973f-4531-8e5f-68d349e231b4","Type":"ContainerDied","Data":"179498d178eaf12fad9776b3020c6b558f9bd713b5bcdcb7b15c957545542e01"} Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.453258 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-598975567d-rtcs4" event={"ID":"0bd07ab2-973f-4531-8e5f-68d349e231b4","Type":"ContainerDied","Data":"54b318fe0eb02a773a8a8e3f52da3411fd766c297175440d422e0326e5e4ff38"} Oct 06 21:53:09 crc kubenswrapper[5014]: E1006 21:53:09.461033 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b521f92d690d7f4582f8adf8d2826fa33c967ac340f2621ad4c5efb64bf29cfb" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 06 21:53:09 crc kubenswrapper[5014]: E1006 21:53:09.461100 5014 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="cefbd6b6-3aa3-459d-9f8d-060736f4de92" containerName="nova-scheduler-scheduler" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.469213 5014 generic.go:334] "Generic (PLEG): container finished" podID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerID="d23dd50b826bbf964b1ff4d5d05a5097816f29157fbba7ced1c970a4eec16c3b" exitCode=0 Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.469314 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"32e89ff3-0d60-4fd1-9c0d-831b92311165","Type":"ContainerDied","Data":"d23dd50b826bbf964b1ff4d5d05a5097816f29157fbba7ced1c970a4eec16c3b"} Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.475510 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"daa5c33b-d941-4030-bf9f-cd6ed831986e","Type":"ContainerDied","Data":"7590737cacad4c6041ad9e0a8ef5ddc26a5bc9cf67233c4f1aba46d2b82548f6"} Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.475680 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.477478 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3c1e8f70-227b-40e0-aceb-470eed382180" (UID: "3c1e8f70-227b-40e0-aceb-470eed382180"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.490253 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd16c866-91b0-4261-a084-7a96ac597c04-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "cd16c866-91b0-4261-a084-7a96ac597c04" (UID: "cd16c866-91b0-4261-a084-7a96ac597c04"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.492990 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0bd07ab2-973f-4531-8e5f-68d349e231b4" (UID: "0bd07ab2-973f-4531-8e5f-68d349e231b4"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.495333 5014 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.495363 5014 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a3bf50b-cb91-4201-affd-0c42d3585df2-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.495374 5014 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.495383 5014 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c1e8f70-227b-40e0-aceb-470eed382180-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.495395 5014 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd16c866-91b0-4261-a084-7a96ac597c04-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.495405 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.495414 5014 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.495423 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/daa5c33b-d941-4030-bf9f-cd6ed831986e-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.507826 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "0bd07ab2-973f-4531-8e5f-68d349e231b4" (UID: "0bd07ab2-973f-4531-8e5f-68d349e231b4"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.510112 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd16c866-91b0-4261-a084-7a96ac597c04-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cd16c866-91b0-4261-a084-7a96ac597c04" (UID: "cd16c866-91b0-4261-a084-7a96ac597c04"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.512971 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-config-data" (OuterVolumeSpecName: "config-data") pod "0bd07ab2-973f-4531-8e5f-68d349e231b4" (UID: "0bd07ab2-973f-4531-8e5f-68d349e231b4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.522116 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="044b7af9-01a7-40c1-803c-30e568aaf1fe" path="/var/lib/kubelet/pods/044b7af9-01a7-40c1-803c-30e568aaf1fe/volumes" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.526478 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06e8dc30-8c95-4585-82e0-fc82de286a1c" path="/var/lib/kubelet/pods/06e8dc30-8c95-4585-82e0-fc82de286a1c/volumes" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.532003 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24c4cd4c-297a-45d8-ad6f-e24e53736ecc" path="/var/lib/kubelet/pods/24c4cd4c-297a-45d8-ad6f-e24e53736ecc/volumes" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.533571 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d91cde4-29d5-4947-b2ee-73e29ac244c2" path="/var/lib/kubelet/pods/2d91cde4-29d5-4947-b2ee-73e29ac244c2/volumes" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.537187 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b18812d-9eec-4254-8633-b40f55244e47" path="/var/lib/kubelet/pods/3b18812d-9eec-4254-8633-b40f55244e47/volumes" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.566520 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="411ad591-8dad-46ef-8a44-88e86f5c86dd" path="/var/lib/kubelet/pods/411ad591-8dad-46ef-8a44-88e86f5c86dd/volumes" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.579547 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c2ecb8-63d7-4275-97ff-7aa899707212" path="/var/lib/kubelet/pods/49c2ecb8-63d7-4275-97ff-7aa899707212/volumes" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.582367 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c301c8b-acb9-4008-9832-ce83dc524b6d" path="/var/lib/kubelet/pods/6c301c8b-acb9-4008-9832-ce83dc524b6d/volumes" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.582885 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ca96007-c66c-4e95-84b7-12eff893cca2" path="/var/lib/kubelet/pods/6ca96007-c66c-4e95-84b7-12eff893cca2/volumes" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.583372 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c" path="/var/lib/kubelet/pods/c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c/volumes" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.583800 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349" path="/var/lib/kubelet/pods/d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349/volumes" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.610161 5014 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.610201 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd07ab2-973f-4531-8e5f-68d349e231b4-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.610213 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd16c866-91b0-4261-a084-7a96ac597c04-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:09 crc kubenswrapper[5014]: E1006 21:53:09.711865 5014 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Oct 06 21:53:09 crc kubenswrapper[5014]: E1006 21:53:09.712168 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-config-data podName:c8f59d7d-f71b-46b0-bd32-476a2517a3b6 nodeName:}" failed. No retries permitted until 2025-10-06 21:53:17.712150895 +0000 UTC m=+1343.005187629 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-config-data") pod "rabbitmq-cell1-server-0" (UID: "c8f59d7d-f71b-46b0-bd32-476a2517a3b6") : configmap "rabbitmq-cell1-config-data" not found Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.829681 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.840801 5014 scope.go:117] "RemoveContainer" containerID="8535cd122ecdef14d55810af72e50b213cf4fd419bc7559f9484489bc8b3b735" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.851888 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.884362 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.903521 5014 scope.go:117] "RemoveContainer" containerID="ee04d15145622330280112d33fa03759b3233a0f8fba1afae6789c4d4d814cb6" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.919647 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-config-data-default\") pod \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.919683 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-config-data\") pod \"32e89ff3-0d60-4fd1-9c0d-831b92311165\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.919699 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-scripts\") pod \"32e89ff3-0d60-4fd1-9c0d-831b92311165\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.919714 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.919734 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkqk2\" (UniqueName: \"kubernetes.io/projected/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-kube-api-access-lkqk2\") pod \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.919786 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-combined-ca-bundle\") pod \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.919805 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-galera-tls-certs\") pod \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.919820 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-secrets\") pod \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.919843 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-sg-core-conf-yaml\") pod \"32e89ff3-0d60-4fd1-9c0d-831b92311165\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.919863 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/32e89ff3-0d60-4fd1-9c0d-831b92311165-run-httpd\") pod \"32e89ff3-0d60-4fd1-9c0d-831b92311165\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.919907 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-combined-ca-bundle\") pod \"32e89ff3-0d60-4fd1-9c0d-831b92311165\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.919933 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-ceilometer-tls-certs\") pod \"32e89ff3-0d60-4fd1-9c0d-831b92311165\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.919965 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4q6b2\" (UniqueName: \"kubernetes.io/projected/32e89ff3-0d60-4fd1-9c0d-831b92311165-kube-api-access-4q6b2\") pod \"32e89ff3-0d60-4fd1-9c0d-831b92311165\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.919986 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-config-data-generated\") pod \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.920020 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/32e89ff3-0d60-4fd1-9c0d-831b92311165-log-httpd\") pod \"32e89ff3-0d60-4fd1-9c0d-831b92311165\" (UID: \"32e89ff3-0d60-4fd1-9c0d-831b92311165\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.920059 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-kolla-config\") pod \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.920076 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-operator-scripts\") pod \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\" (UID: \"2439cd4a-a6a0-4b85-b4d5-3952b61af5bd\") " Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.920910 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" (UID: "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.922838 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32e89ff3-0d60-4fd1-9c0d-831b92311165-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "32e89ff3-0d60-4fd1-9c0d-831b92311165" (UID: "32e89ff3-0d60-4fd1-9c0d-831b92311165"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.923299 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" (UID: "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.923658 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" (UID: "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.925554 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.926201 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32e89ff3-0d60-4fd1-9c0d-831b92311165-kube-api-access-4q6b2" (OuterVolumeSpecName: "kube-api-access-4q6b2") pod "32e89ff3-0d60-4fd1-9c0d-831b92311165" (UID: "32e89ff3-0d60-4fd1-9c0d-831b92311165"). InnerVolumeSpecName "kube-api-access-4q6b2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.926516 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-secrets" (OuterVolumeSpecName: "secrets") pod "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" (UID: "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd"). InnerVolumeSpecName "secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.928522 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.928877 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32e89ff3-0d60-4fd1-9c0d-831b92311165-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "32e89ff3-0d60-4fd1-9c0d-831b92311165" (UID: "32e89ff3-0d60-4fd1-9c0d-831b92311165"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.931210 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-kube-api-access-lkqk2" (OuterVolumeSpecName: "kube-api-access-lkqk2") pod "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" (UID: "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd"). InnerVolumeSpecName "kube-api-access-lkqk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.933540 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-scripts" (OuterVolumeSpecName: "scripts") pod "32e89ff3-0d60-4fd1-9c0d-831b92311165" (UID: "32e89ff3-0d60-4fd1-9c0d-831b92311165"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.933809 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.923154 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" (UID: "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.941091 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "mysql-db") pod "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" (UID: "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.943776 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.952740 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "32e89ff3-0d60-4fd1-9c0d-831b92311165" (UID: "32e89ff3-0d60-4fd1-9c0d-831b92311165"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.957988 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-598975567d-rtcs4"] Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.959530 5014 scope.go:117] "RemoveContainer" containerID="9baa658cf1475d9f5420e5c02783c38281b57faef5be3f69ab55c29aa8529137" Oct 06 21:53:09 crc kubenswrapper[5014]: I1006 21:53:09.985319 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-598975567d-rtcs4"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.001638 5014 scope.go:117] "RemoveContainer" containerID="e8673788407b2a922e671f5a29648d8cec35c573c85be82d267e82064c4a8203" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.011786 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021128 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-config-data\") pod \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021165 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-combined-ca-bundle\") pod \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021232 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-combined-ca-bundle\") pod \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021256 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-combined-ca-bundle\") pod \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021279 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hz964\" (UniqueName: \"kubernetes.io/projected/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-kube-api-access-hz964\") pod \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021296 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6d7x\" (UniqueName: \"kubernetes.io/projected/13fe806f-0e4e-4ea7-838b-938c5fe74c99-kube-api-access-k6d7x\") pod \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021313 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13fe806f-0e4e-4ea7-838b-938c5fe74c99-logs\") pod \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021405 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-scripts\") pod \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021427 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-config-data-custom\") pod \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\" (UID: \"13fe806f-0e4e-4ea7-838b-938c5fe74c99\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021455 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-logs\") pod \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021502 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-config-data-custom\") pod \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021541 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-config-data\") pod \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021559 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6mjqj\" (UniqueName: \"kubernetes.io/projected/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-kube-api-access-6mjqj\") pod \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021578 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-etc-machine-id\") pod \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021600 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-config-data\") pod \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\" (UID: \"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021637 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-config-data-custom\") pod \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\" (UID: \"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021928 5014 reconciler_common.go:293] "Volume detached for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-secrets\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021944 5014 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021955 5014 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/32e89ff3-0d60-4fd1-9c0d-831b92311165-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021963 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4q6b2\" (UniqueName: \"kubernetes.io/projected/32e89ff3-0d60-4fd1-9c0d-831b92311165-kube-api-access-4q6b2\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021972 5014 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-config-data-generated\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021980 5014 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/32e89ff3-0d60-4fd1-9c0d-831b92311165-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021989 5014 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-kolla-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.021996 5014 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-operator-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.022005 5014 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-config-data-default\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.022013 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.022030 5014 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.022040 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkqk2\" (UniqueName: \"kubernetes.io/projected/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-kube-api-access-lkqk2\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.024659 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.024704 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" (UID: "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.025265 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-logs" (OuterVolumeSpecName: "logs") pod "56ddac83-2b9f-4a7d-a9c9-02de44e0ab00" (UID: "56ddac83-2b9f-4a7d-a9c9-02de44e0ab00"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.026347 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13fe806f-0e4e-4ea7-838b-938c5fe74c99-logs" (OuterVolumeSpecName: "logs") pod "13fe806f-0e4e-4ea7-838b-938c5fe74c99" (UID: "13fe806f-0e4e-4ea7-838b-938c5fe74c99"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.026706 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" (UID: "02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.034720 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.039950 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-kube-api-access-6mjqj" (OuterVolumeSpecName: "kube-api-access-6mjqj") pod "02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" (UID: "02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf"). InnerVolumeSpecName "kube-api-access-6mjqj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.041942 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.047719 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.053959 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.059512 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.061736 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-kube-api-access-hz964" (OuterVolumeSpecName: "kube-api-access-hz964") pod "56ddac83-2b9f-4a7d-a9c9-02de44e0ab00" (UID: "56ddac83-2b9f-4a7d-a9c9-02de44e0ab00"). InnerVolumeSpecName "kube-api-access-hz964". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.064367 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.066168 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "56ddac83-2b9f-4a7d-a9c9-02de44e0ab00" (UID: "56ddac83-2b9f-4a7d-a9c9-02de44e0ab00"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.067376 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "32e89ff3-0d60-4fd1-9c0d-831b92311165" (UID: "32e89ff3-0d60-4fd1-9c0d-831b92311165"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.067677 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "13fe806f-0e4e-4ea7-838b-938c5fe74c99" (UID: "13fe806f-0e4e-4ea7-838b-938c5fe74c99"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.072812 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" (UID: "02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.072924 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-scripts" (OuterVolumeSpecName: "scripts") pod "02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" (UID: "02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.073589 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13fe806f-0e4e-4ea7-838b-938c5fe74c99-kube-api-access-k6d7x" (OuterVolumeSpecName: "kube-api-access-k6d7x") pod "13fe806f-0e4e-4ea7-838b-938c5fe74c99" (UID: "13fe806f-0e4e-4ea7-838b-938c5fe74c99"). InnerVolumeSpecName "kube-api-access-k6d7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.104070 5014 scope.go:117] "RemoveContainer" containerID="9e1f60c39c5b159a972961d88da6cb37576b5e416b62ae8289bdf2915b1ab1f7" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.104259 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" (UID: "2439cd4a-a6a0-4b85-b4d5-3952b61af5bd"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.108813 5014 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.123276 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6d7x\" (UniqueName: \"kubernetes.io/projected/13fe806f-0e4e-4ea7-838b-938c5fe74c99-kube-api-access-k6d7x\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.123554 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13fe806f-0e4e-4ea7-838b-938c5fe74c99-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.123565 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.123573 5014 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.123582 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-logs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.123591 5014 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.123602 5014 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.123611 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6mjqj\" (UniqueName: \"kubernetes.io/projected/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-kube-api-access-6mjqj\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.123631 5014 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.123639 5014 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.123649 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.123659 5014 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.123667 5014 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.123675 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hz964\" (UniqueName: \"kubernetes.io/projected/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-kube-api-access-hz964\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.123733 5014 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.123891 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-config-data podName:4b977fc8-6c11-41e6-9500-f0da2d66aea1 nodeName:}" failed. No retries permitted until 2025-10-06 21:53:18.123873512 +0000 UTC m=+1343.416910246 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-config-data") pod "rabbitmq-server-0" (UID: "4b977fc8-6c11-41e6-9500-f0da2d66aea1") : configmap "rabbitmq-config-data" not found Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.125102 5014 scope.go:117] "RemoveContainer" containerID="e8673788407b2a922e671f5a29648d8cec35c573c85be82d267e82064c4a8203" Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.126673 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8673788407b2a922e671f5a29648d8cec35c573c85be82d267e82064c4a8203\": container with ID starting with e8673788407b2a922e671f5a29648d8cec35c573c85be82d267e82064c4a8203 not found: ID does not exist" containerID="e8673788407b2a922e671f5a29648d8cec35c573c85be82d267e82064c4a8203" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.126710 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8673788407b2a922e671f5a29648d8cec35c573c85be82d267e82064c4a8203"} err="failed to get container status \"e8673788407b2a922e671f5a29648d8cec35c573c85be82d267e82064c4a8203\": rpc error: code = NotFound desc = could not find container \"e8673788407b2a922e671f5a29648d8cec35c573c85be82d267e82064c4a8203\": container with ID starting with e8673788407b2a922e671f5a29648d8cec35c573c85be82d267e82064c4a8203 not found: ID does not exist" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.126728 5014 scope.go:117] "RemoveContainer" containerID="9e1f60c39c5b159a972961d88da6cb37576b5e416b62ae8289bdf2915b1ab1f7" Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.127146 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e1f60c39c5b159a972961d88da6cb37576b5e416b62ae8289bdf2915b1ab1f7\": container with ID starting with 9e1f60c39c5b159a972961d88da6cb37576b5e416b62ae8289bdf2915b1ab1f7 not found: ID does not exist" containerID="9e1f60c39c5b159a972961d88da6cb37576b5e416b62ae8289bdf2915b1ab1f7" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.127164 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e1f60c39c5b159a972961d88da6cb37576b5e416b62ae8289bdf2915b1ab1f7"} err="failed to get container status \"9e1f60c39c5b159a972961d88da6cb37576b5e416b62ae8289bdf2915b1ab1f7\": rpc error: code = NotFound desc = could not find container \"9e1f60c39c5b159a972961d88da6cb37576b5e416b62ae8289bdf2915b1ab1f7\": container with ID starting with 9e1f60c39c5b159a972961d88da6cb37576b5e416b62ae8289bdf2915b1ab1f7 not found: ID does not exist" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.127175 5014 scope.go:117] "RemoveContainer" containerID="c486ab912c7417dff548ef9db60b7c8a203254071119a11ef86f79a80452130d" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.129896 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-config-data" (OuterVolumeSpecName: "config-data") pod "32e89ff3-0d60-4fd1-9c0d-831b92311165" (UID: "32e89ff3-0d60-4fd1-9c0d-831b92311165"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.149222 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "32e89ff3-0d60-4fd1-9c0d-831b92311165" (UID: "32e89ff3-0d60-4fd1-9c0d-831b92311165"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.152703 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a5aaa8e6da3bb1475013fcf9505c94fb4f157784a202b67b1fafdd6706d374db" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.154593 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a5aaa8e6da3bb1475013fcf9505c94fb4f157784a202b67b1fafdd6706d374db" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.160395 5014 scope.go:117] "RemoveContainer" containerID="a4c0aa6211dfec0a5eda7b347326298f80376112ca717764da53c00f048df69b" Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.160501 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a5aaa8e6da3bb1475013fcf9505c94fb4f157784a202b67b1fafdd6706d374db" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.160548 5014 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" containerName="ovn-northd" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.168604 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "13fe806f-0e4e-4ea7-838b-938c5fe74c99" (UID: "13fe806f-0e4e-4ea7-838b-938c5fe74c99"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.169882 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.189930 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "56ddac83-2b9f-4a7d-a9c9-02de44e0ab00" (UID: "56ddac83-2b9f-4a7d-a9c9-02de44e0ab00"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.199929 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" (UID: "02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.204770 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-config-data" (OuterVolumeSpecName: "config-data") pod "13fe806f-0e4e-4ea7-838b-938c5fe74c99" (UID: "13fe806f-0e4e-4ea7-838b-938c5fe74c99"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.220182 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-config-data" (OuterVolumeSpecName: "config-data") pod "56ddac83-2b9f-4a7d-a9c9-02de44e0ab00" (UID: "56ddac83-2b9f-4a7d-a9c9-02de44e0ab00"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.224588 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jwckt\" (UniqueName: \"kubernetes.io/projected/cefbd6b6-3aa3-459d-9f8d-060736f4de92-kube-api-access-jwckt\") pod \"cefbd6b6-3aa3-459d-9f8d-060736f4de92\" (UID: \"cefbd6b6-3aa3-459d-9f8d-060736f4de92\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.224771 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cefbd6b6-3aa3-459d-9f8d-060736f4de92-config-data\") pod \"cefbd6b6-3aa3-459d-9f8d-060736f4de92\" (UID: \"cefbd6b6-3aa3-459d-9f8d-060736f4de92\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.224967 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cefbd6b6-3aa3-459d-9f8d-060736f4de92-combined-ca-bundle\") pod \"cefbd6b6-3aa3-459d-9f8d-060736f4de92\" (UID: \"cefbd6b6-3aa3-459d-9f8d-060736f4de92\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.225455 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.225480 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.225492 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.225504 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.225520 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32e89ff3-0d60-4fd1-9c0d-831b92311165-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.225531 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.225542 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13fe806f-0e4e-4ea7-838b-938c5fe74c99-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.229602 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cefbd6b6-3aa3-459d-9f8d-060736f4de92-kube-api-access-jwckt" (OuterVolumeSpecName: "kube-api-access-jwckt") pod "cefbd6b6-3aa3-459d-9f8d-060736f4de92" (UID: "cefbd6b6-3aa3-459d-9f8d-060736f4de92"). InnerVolumeSpecName "kube-api-access-jwckt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.233963 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.236795 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.238551 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.238647 5014 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="7de90fe7-747a-4334-be2a-d3b5ee6b8148" containerName="nova-cell1-conductor-conductor" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.270543 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cefbd6b6-3aa3-459d-9f8d-060736f4de92-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cefbd6b6-3aa3-459d-9f8d-060736f4de92" (UID: "cefbd6b6-3aa3-459d-9f8d-060736f4de92"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.280818 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cefbd6b6-3aa3-459d-9f8d-060736f4de92-config-data" (OuterVolumeSpecName: "config-data") pod "cefbd6b6-3aa3-459d-9f8d-060736f4de92" (UID: "cefbd6b6-3aa3-459d-9f8d-060736f4de92"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.318778 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-config-data" (OuterVolumeSpecName: "config-data") pod "02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" (UID: "02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.327651 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cefbd6b6-3aa3-459d-9f8d-060736f4de92-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.327687 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.327700 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cefbd6b6-3aa3-459d-9f8d-060736f4de92-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.327713 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jwckt\" (UniqueName: \"kubernetes.io/projected/cefbd6b6-3aa3-459d-9f8d-060736f4de92-kube-api-access-jwckt\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.496015 5014 generic.go:334] "Generic (PLEG): container finished" podID="fccac7f9-eeaa-4481-ab49-9e71dd8af79c" containerID="366d9c7d12dc53287d03434a10b7448a9c6d02142fd4ec78034d67e5b49d8e4b" exitCode=0 Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.496112 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-78bf4bbdb7-6fpl9" event={"ID":"fccac7f9-eeaa-4481-ab49-9e71dd8af79c","Type":"ContainerDied","Data":"366d9c7d12dc53287d03434a10b7448a9c6d02142fd4ec78034d67e5b49d8e4b"} Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.496152 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-78bf4bbdb7-6fpl9" event={"ID":"fccac7f9-eeaa-4481-ab49-9e71dd8af79c","Type":"ContainerDied","Data":"5b9550d1305528ae1b51d93dfd6bb36724b1d7e88e1bee75f3e54ad303dd2119"} Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.496171 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b9550d1305528ae1b51d93dfd6bb36724b1d7e88e1bee75f3e54ad303dd2119" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.499518 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"32e89ff3-0d60-4fd1-9c0d-831b92311165","Type":"ContainerDied","Data":"cef1aa613c35b40230a30282b50b0b6a417cd2dc5f522cbf08e2b5a696a5278b"} Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.499637 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.513056 5014 generic.go:334] "Generic (PLEG): container finished" podID="02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" containerID="8874248b35b3044ef165fdc3593e8e2e389f8fc195b0aace5c1260fbf7aea6e6" exitCode=0 Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.513141 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf","Type":"ContainerDied","Data":"8874248b35b3044ef165fdc3593e8e2e389f8fc195b0aace5c1260fbf7aea6e6"} Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.513170 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf","Type":"ContainerDied","Data":"399eb4cfb0a9dbf7669b0c4a88685369d200d61fd03a67f2e15006ad5b098abb"} Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.513246 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.521795 5014 scope.go:117] "RemoveContainer" containerID="217fe11fe290a286d9693b3ce94387c3c5d799112f056294ef17d3e38a043e2a" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.523559 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.526082 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_dab38488-ecfd-45ec-bb75-5e3d5bdd42e7/ovn-northd/0.log" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.526127 5014 generic.go:334] "Generic (PLEG): container finished" podID="dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" containerID="a5aaa8e6da3bb1475013fcf9505c94fb4f157784a202b67b1fafdd6706d374db" exitCode=139 Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.526198 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7","Type":"ContainerDied","Data":"a5aaa8e6da3bb1475013fcf9505c94fb4f157784a202b67b1fafdd6706d374db"} Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.542250 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-697f765b44-77s6g" event={"ID":"13fe806f-0e4e-4ea7-838b-938c5fe74c99","Type":"ContainerDied","Data":"ee6633ae7d4c2991890663755df7dea48bc2b11a8fff2a845c44928c91a27090"} Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.542365 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-697f765b44-77s6g" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.550034 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.556484 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.564070 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-849cf44bc5-9qnb4" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.564216 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-849cf44bc5-9qnb4" event={"ID":"56ddac83-2b9f-4a7d-a9c9-02de44e0ab00","Type":"ContainerDied","Data":"fc2ee08b550a6d641e24574700b1a69c677041fd959ed673c56e5c5a970cb8f4"} Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.571249 5014 scope.go:117] "RemoveContainer" containerID="a4c0aa6211dfec0a5eda7b347326298f80376112ca717764da53c00f048df69b" Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.579751 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4c0aa6211dfec0a5eda7b347326298f80376112ca717764da53c00f048df69b\": container with ID starting with a4c0aa6211dfec0a5eda7b347326298f80376112ca717764da53c00f048df69b not found: ID does not exist" containerID="a4c0aa6211dfec0a5eda7b347326298f80376112ca717764da53c00f048df69b" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.579794 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4c0aa6211dfec0a5eda7b347326298f80376112ca717764da53c00f048df69b"} err="failed to get container status \"a4c0aa6211dfec0a5eda7b347326298f80376112ca717764da53c00f048df69b\": rpc error: code = NotFound desc = could not find container \"a4c0aa6211dfec0a5eda7b347326298f80376112ca717764da53c00f048df69b\": container with ID starting with a4c0aa6211dfec0a5eda7b347326298f80376112ca717764da53c00f048df69b not found: ID does not exist" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.579820 5014 scope.go:117] "RemoveContainer" containerID="217fe11fe290a286d9693b3ce94387c3c5d799112f056294ef17d3e38a043e2a" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.579995 5014 generic.go:334] "Generic (PLEG): container finished" podID="cefbd6b6-3aa3-459d-9f8d-060736f4de92" containerID="b521f92d690d7f4582f8adf8d2826fa33c967ac340f2621ad4c5efb64bf29cfb" exitCode=0 Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.580039 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cefbd6b6-3aa3-459d-9f8d-060736f4de92","Type":"ContainerDied","Data":"b521f92d690d7f4582f8adf8d2826fa33c967ac340f2621ad4c5efb64bf29cfb"} Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.580062 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cefbd6b6-3aa3-459d-9f8d-060736f4de92","Type":"ContainerDied","Data":"358e1821e030ef51d6ba8f0efe6360a432df751a1831462c97c302d5fdb0faab"} Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.580103 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.581164 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"217fe11fe290a286d9693b3ce94387c3c5d799112f056294ef17d3e38a043e2a\": container with ID starting with 217fe11fe290a286d9693b3ce94387c3c5d799112f056294ef17d3e38a043e2a not found: ID does not exist" containerID="217fe11fe290a286d9693b3ce94387c3c5d799112f056294ef17d3e38a043e2a" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.581210 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"217fe11fe290a286d9693b3ce94387c3c5d799112f056294ef17d3e38a043e2a"} err="failed to get container status \"217fe11fe290a286d9693b3ce94387c3c5d799112f056294ef17d3e38a043e2a\": rpc error: code = NotFound desc = could not find container \"217fe11fe290a286d9693b3ce94387c3c5d799112f056294ef17d3e38a043e2a\": container with ID starting with 217fe11fe290a286d9693b3ce94387c3c5d799112f056294ef17d3e38a043e2a not found: ID does not exist" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.581240 5014 scope.go:117] "RemoveContainer" containerID="496962eeeb2ad1c6b5517af39859967ca3ba2462e7203bf88e13bcb22e74bca9" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.584371 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.588154 5014 generic.go:334] "Generic (PLEG): container finished" podID="c8f59d7d-f71b-46b0-bd32-476a2517a3b6" containerID="eb4d1ac3e92d3cfcfc09e4936b90190475ab22e4925f8ce4f363a59470abfbe5" exitCode=0 Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.588248 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.588835 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c8f59d7d-f71b-46b0-bd32-476a2517a3b6","Type":"ContainerDied","Data":"eb4d1ac3e92d3cfcfc09e4936b90190475ab22e4925f8ce4f363a59470abfbe5"} Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.597060 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.603209 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-697f765b44-77s6g"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.632781 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-697f765b44-77s6g"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.637593 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-fernet-keys\") pod \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.637676 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-credential-keys\") pod \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.637705 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-config-data\") pod \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.637756 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-combined-ca-bundle\") pod \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.637790 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-scripts\") pod \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.637870 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-internal-tls-certs\") pod \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.637906 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fc2ts\" (UniqueName: \"kubernetes.io/projected/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-kube-api-access-fc2ts\") pod \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.637946 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-public-tls-certs\") pod \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\" (UID: \"fccac7f9-eeaa-4481-ab49-9e71dd8af79c\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.651483 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-scripts" (OuterVolumeSpecName: "scripts") pod "fccac7f9-eeaa-4481-ab49-9e71dd8af79c" (UID: "fccac7f9-eeaa-4481-ab49-9e71dd8af79c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.654964 5014 scope.go:117] "RemoveContainer" containerID="d836f8e8bcb39601fc27693a322159da55ec8df8c24bea7d87e72544f5c6ca25" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.668018 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "fccac7f9-eeaa-4481-ab49-9e71dd8af79c" (UID: "fccac7f9-eeaa-4481-ab49-9e71dd8af79c"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.686960 5014 scope.go:117] "RemoveContainer" containerID="cdc30334797cf0d0a1abb5ab3b3e87dbac8996fc837919c9242d311431f403b2" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.686303 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "fccac7f9-eeaa-4481-ab49-9e71dd8af79c" (UID: "fccac7f9-eeaa-4481-ab49-9e71dd8af79c"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.712099 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-849cf44bc5-9qnb4"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.713137 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-kube-api-access-fc2ts" (OuterVolumeSpecName: "kube-api-access-fc2ts") pod "fccac7f9-eeaa-4481-ab49-9e71dd8af79c" (UID: "fccac7f9-eeaa-4481-ab49-9e71dd8af79c"). InnerVolumeSpecName "kube-api-access-fc2ts". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.723753 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fccac7f9-eeaa-4481-ab49-9e71dd8af79c" (UID: "fccac7f9-eeaa-4481-ab49-9e71dd8af79c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.724549 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-849cf44bc5-9qnb4"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.724739 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "fccac7f9-eeaa-4481-ab49-9e71dd8af79c" (UID: "fccac7f9-eeaa-4481-ab49-9e71dd8af79c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.741789 5014 scope.go:117] "RemoveContainer" containerID="cdc30334797cf0d0a1abb5ab3b3e87dbac8996fc837919c9242d311431f403b2" Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.742965 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdc30334797cf0d0a1abb5ab3b3e87dbac8996fc837919c9242d311431f403b2\": container with ID starting with cdc30334797cf0d0a1abb5ab3b3e87dbac8996fc837919c9242d311431f403b2 not found: ID does not exist" containerID="cdc30334797cf0d0a1abb5ab3b3e87dbac8996fc837919c9242d311431f403b2" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.743001 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdc30334797cf0d0a1abb5ab3b3e87dbac8996fc837919c9242d311431f403b2"} err="failed to get container status \"cdc30334797cf0d0a1abb5ab3b3e87dbac8996fc837919c9242d311431f403b2\": rpc error: code = NotFound desc = could not find container \"cdc30334797cf0d0a1abb5ab3b3e87dbac8996fc837919c9242d311431f403b2\": container with ID starting with cdc30334797cf0d0a1abb5ab3b3e87dbac8996fc837919c9242d311431f403b2 not found: ID does not exist" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.743022 5014 scope.go:117] "RemoveContainer" containerID="f799cb043ff7d806ae4f877f34b2084e03598231d0c40a3275884b945a348c9e" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.743053 5014 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.743084 5014 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-credential-keys\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.745537 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.745559 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.745605 5014 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.745636 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fc2ts\" (UniqueName: \"kubernetes.io/projected/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-kube-api-access-fc2ts\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.746558 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.748846 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-config-data" (OuterVolumeSpecName: "config-data") pod "fccac7f9-eeaa-4481-ab49-9e71dd8af79c" (UID: "fccac7f9-eeaa-4481-ab49-9e71dd8af79c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.758686 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "fccac7f9-eeaa-4481-ab49-9e71dd8af79c" (UID: "fccac7f9-eeaa-4481-ab49-9e71dd8af79c"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.763497 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.770649 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.777699 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.779868 5014 scope.go:117] "RemoveContainer" containerID="f69566e2159d9025363346b43e15495ad0dab925585a51aae0e3e6b46ac486f8" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.795072 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_dab38488-ecfd-45ec-bb75-5e3d5bdd42e7/ovn-northd/0.log" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.795361 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.796202 5014 scope.go:117] "RemoveContainer" containerID="6d9c705385f5fdafb7775a2b08e388c98d6323f0cc8e8398d03748b74369b48a" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.823720 5014 scope.go:117] "RemoveContainer" containerID="adb9b9301193619d4a64043aa363957a79778e3963acf98e99151a9c5d6dc68d" Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.823808 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.823867 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.824500 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.825019 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.825048 5014 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-fwbdt" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovsdb-server" Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.825388 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.829834 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.829863 5014 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-fwbdt" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovs-vswitchd" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.844783 5014 scope.go:117] "RemoveContainer" containerID="b619016e22f6883fb4aa46369b1d687e057d2e2ffd28c9ca34b55365fb9839c5" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.846150 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-ovn-northd-tls-certs\") pod \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.846314 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-scripts\") pod \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.846346 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-combined-ca-bundle\") pod \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.846378 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-metrics-certs-tls-certs\") pod \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.846404 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-config\") pod \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.846441 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-ovn-rundir\") pod \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.846556 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzwp7\" (UniqueName: \"kubernetes.io/projected/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-kube-api-access-tzwp7\") pod \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\" (UID: \"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.847119 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.847142 5014 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fccac7f9-eeaa-4481-ab49-9e71dd8af79c-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.848347 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-scripts" (OuterVolumeSpecName: "scripts") pod "dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" (UID: "dab38488-ecfd-45ec-bb75-5e3d5bdd42e7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.848857 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" (UID: "dab38488-ecfd-45ec-bb75-5e3d5bdd42e7"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.848926 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-config" (OuterVolumeSpecName: "config") pod "dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" (UID: "dab38488-ecfd-45ec-bb75-5e3d5bdd42e7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.850081 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-kube-api-access-tzwp7" (OuterVolumeSpecName: "kube-api-access-tzwp7") pod "dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" (UID: "dab38488-ecfd-45ec-bb75-5e3d5bdd42e7"). InnerVolumeSpecName "kube-api-access-tzwp7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.850485 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.852586 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.856439 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.856507 5014 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="6ebf215b-a88f-4b08-8f2e-58284b7d4548" containerName="nova-cell0-conductor-conductor" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.862229 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.877871 5014 scope.go:117] "RemoveContainer" containerID="1427a4912ecba9f84ea4a41b472f8f218b0c4df4bc3dd48139b02a15ecfe2a67" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.880360 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" (UID: "dab38488-ecfd-45ec-bb75-5e3d5bdd42e7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.922585 5014 scope.go:117] "RemoveContainer" containerID="179498d178eaf12fad9776b3020c6b558f9bd713b5bcdcb7b15c957545542e01" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.939988 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" (UID: "dab38488-ecfd-45ec-bb75-5e3d5bdd42e7"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.945784 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" (UID: "dab38488-ecfd-45ec-bb75-5e3d5bdd42e7"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.946797 5014 scope.go:117] "RemoveContainer" containerID="8f5fff8a69f244b622369e3d3222e772ceb03080fc79e98a2126892aa6220818" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.947511 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-confd\") pod \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.947587 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-config-data\") pod \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.947657 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-plugins\") pod \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.947678 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-erlang-cookie-secret\") pod \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.947711 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.947742 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-pod-info\") pod \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.947756 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-plugins-conf\") pod \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.947778 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-server-conf\") pod \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.947803 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-tls\") pod \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.947846 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qjbz\" (UniqueName: \"kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-kube-api-access-2qjbz\") pod \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.947862 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-erlang-cookie\") pod \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\" (UID: \"c8f59d7d-f71b-46b0-bd32-476a2517a3b6\") " Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.948149 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.948178 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.948188 5014 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.948196 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.948204 5014 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-ovn-rundir\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.948212 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzwp7\" (UniqueName: \"kubernetes.io/projected/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-kube-api-access-tzwp7\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.948221 5014 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.948675 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "c8f59d7d-f71b-46b0-bd32-476a2517a3b6" (UID: "c8f59d7d-f71b-46b0-bd32-476a2517a3b6"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.948821 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "c8f59d7d-f71b-46b0-bd32-476a2517a3b6" (UID: "c8f59d7d-f71b-46b0-bd32-476a2517a3b6"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.949174 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "c8f59d7d-f71b-46b0-bd32-476a2517a3b6" (UID: "c8f59d7d-f71b-46b0-bd32-476a2517a3b6"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.951159 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "c8f59d7d-f71b-46b0-bd32-476a2517a3b6" (UID: "c8f59d7d-f71b-46b0-bd32-476a2517a3b6"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.954200 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "persistence") pod "c8f59d7d-f71b-46b0-bd32-476a2517a3b6" (UID: "c8f59d7d-f71b-46b0-bd32-476a2517a3b6"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.954980 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "c8f59d7d-f71b-46b0-bd32-476a2517a3b6" (UID: "c8f59d7d-f71b-46b0-bd32-476a2517a3b6"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.955018 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-kube-api-access-2qjbz" (OuterVolumeSpecName: "kube-api-access-2qjbz") pod "c8f59d7d-f71b-46b0-bd32-476a2517a3b6" (UID: "c8f59d7d-f71b-46b0-bd32-476a2517a3b6"). InnerVolumeSpecName "kube-api-access-2qjbz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.960920 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-pod-info" (OuterVolumeSpecName: "pod-info") pod "c8f59d7d-f71b-46b0-bd32-476a2517a3b6" (UID: "c8f59d7d-f71b-46b0-bd32-476a2517a3b6"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.977346 5014 scope.go:117] "RemoveContainer" containerID="179498d178eaf12fad9776b3020c6b558f9bd713b5bcdcb7b15c957545542e01" Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.977898 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"179498d178eaf12fad9776b3020c6b558f9bd713b5bcdcb7b15c957545542e01\": container with ID starting with 179498d178eaf12fad9776b3020c6b558f9bd713b5bcdcb7b15c957545542e01 not found: ID does not exist" containerID="179498d178eaf12fad9776b3020c6b558f9bd713b5bcdcb7b15c957545542e01" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.977941 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"179498d178eaf12fad9776b3020c6b558f9bd713b5bcdcb7b15c957545542e01"} err="failed to get container status \"179498d178eaf12fad9776b3020c6b558f9bd713b5bcdcb7b15c957545542e01\": rpc error: code = NotFound desc = could not find container \"179498d178eaf12fad9776b3020c6b558f9bd713b5bcdcb7b15c957545542e01\": container with ID starting with 179498d178eaf12fad9776b3020c6b558f9bd713b5bcdcb7b15c957545542e01 not found: ID does not exist" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.977967 5014 scope.go:117] "RemoveContainer" containerID="8f5fff8a69f244b622369e3d3222e772ceb03080fc79e98a2126892aa6220818" Oct 06 21:53:10 crc kubenswrapper[5014]: E1006 21:53:10.978362 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f5fff8a69f244b622369e3d3222e772ceb03080fc79e98a2126892aa6220818\": container with ID starting with 8f5fff8a69f244b622369e3d3222e772ceb03080fc79e98a2126892aa6220818 not found: ID does not exist" containerID="8f5fff8a69f244b622369e3d3222e772ceb03080fc79e98a2126892aa6220818" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.978420 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f5fff8a69f244b622369e3d3222e772ceb03080fc79e98a2126892aa6220818"} err="failed to get container status \"8f5fff8a69f244b622369e3d3222e772ceb03080fc79e98a2126892aa6220818\": rpc error: code = NotFound desc = could not find container \"8f5fff8a69f244b622369e3d3222e772ceb03080fc79e98a2126892aa6220818\": container with ID starting with 8f5fff8a69f244b622369e3d3222e772ceb03080fc79e98a2126892aa6220818 not found: ID does not exist" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.978442 5014 scope.go:117] "RemoveContainer" containerID="1b5ca8147b22af5bd68e4073c702853e918a3da61cc14255747ea6cd2310a427" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.983284 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-config-data" (OuterVolumeSpecName: "config-data") pod "c8f59d7d-f71b-46b0-bd32-476a2517a3b6" (UID: "c8f59d7d-f71b-46b0-bd32-476a2517a3b6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:10 crc kubenswrapper[5014]: I1006 21:53:10.999151 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-server-conf" (OuterVolumeSpecName: "server-conf") pod "c8f59d7d-f71b-46b0-bd32-476a2517a3b6" (UID: "c8f59d7d-f71b-46b0-bd32-476a2517a3b6"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.011880 5014 scope.go:117] "RemoveContainer" containerID="4563049f441233d9899b9510c09134c6b97cbbfee44d45e1869f367480e6495a" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.028081 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "c8f59d7d-f71b-46b0-bd32-476a2517a3b6" (UID: "c8f59d7d-f71b-46b0-bd32-476a2517a3b6"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.031857 5014 scope.go:117] "RemoveContainer" containerID="1c78227e99eced5448ed246e1bcbef1171c8d55eecc061f23758f282026f4fed" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.049507 5014 scope.go:117] "RemoveContainer" containerID="ec20b9f41983e4b8a0ea0f9349995c36994eedc43877f52f349e3a2298e48326" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.049993 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.050016 5014 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.050029 5014 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.050056 5014 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.050069 5014 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-pod-info\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.050079 5014 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-plugins-conf\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.050090 5014 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-server-conf\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.050101 5014 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.050111 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qjbz\" (UniqueName: \"kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-kube-api-access-2qjbz\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.050122 5014 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.050138 5014 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c8f59d7d-f71b-46b0-bd32-476a2517a3b6-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.068956 5014 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.077862 5014 scope.go:117] "RemoveContainer" containerID="d23dd50b826bbf964b1ff4d5d05a5097816f29157fbba7ced1c970a4eec16c3b" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.101938 5014 scope.go:117] "RemoveContainer" containerID="1e488f802cc8da011f91e05b3c478b47334580c1a8c26408bcadc4e55ec4cd81" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.122681 5014 scope.go:117] "RemoveContainer" containerID="8e43bbec207ad90407cab0459eeaa37da10b5228534b3bcc64a9d48cab00a4fb" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.147556 5014 scope.go:117] "RemoveContainer" containerID="8874248b35b3044ef165fdc3593e8e2e389f8fc195b0aace5c1260fbf7aea6e6" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.151490 5014 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.178443 5014 scope.go:117] "RemoveContainer" containerID="8e43bbec207ad90407cab0459eeaa37da10b5228534b3bcc64a9d48cab00a4fb" Oct 06 21:53:11 crc kubenswrapper[5014]: E1006 21:53:11.178972 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e43bbec207ad90407cab0459eeaa37da10b5228534b3bcc64a9d48cab00a4fb\": container with ID starting with 8e43bbec207ad90407cab0459eeaa37da10b5228534b3bcc64a9d48cab00a4fb not found: ID does not exist" containerID="8e43bbec207ad90407cab0459eeaa37da10b5228534b3bcc64a9d48cab00a4fb" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.179012 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e43bbec207ad90407cab0459eeaa37da10b5228534b3bcc64a9d48cab00a4fb"} err="failed to get container status \"8e43bbec207ad90407cab0459eeaa37da10b5228534b3bcc64a9d48cab00a4fb\": rpc error: code = NotFound desc = could not find container \"8e43bbec207ad90407cab0459eeaa37da10b5228534b3bcc64a9d48cab00a4fb\": container with ID starting with 8e43bbec207ad90407cab0459eeaa37da10b5228534b3bcc64a9d48cab00a4fb not found: ID does not exist" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.179054 5014 scope.go:117] "RemoveContainer" containerID="8874248b35b3044ef165fdc3593e8e2e389f8fc195b0aace5c1260fbf7aea6e6" Oct 06 21:53:11 crc kubenswrapper[5014]: E1006 21:53:11.179474 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8874248b35b3044ef165fdc3593e8e2e389f8fc195b0aace5c1260fbf7aea6e6\": container with ID starting with 8874248b35b3044ef165fdc3593e8e2e389f8fc195b0aace5c1260fbf7aea6e6 not found: ID does not exist" containerID="8874248b35b3044ef165fdc3593e8e2e389f8fc195b0aace5c1260fbf7aea6e6" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.179514 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8874248b35b3044ef165fdc3593e8e2e389f8fc195b0aace5c1260fbf7aea6e6"} err="failed to get container status \"8874248b35b3044ef165fdc3593e8e2e389f8fc195b0aace5c1260fbf7aea6e6\": rpc error: code = NotFound desc = could not find container \"8874248b35b3044ef165fdc3593e8e2e389f8fc195b0aace5c1260fbf7aea6e6\": container with ID starting with 8874248b35b3044ef165fdc3593e8e2e389f8fc195b0aace5c1260fbf7aea6e6 not found: ID does not exist" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.179543 5014 scope.go:117] "RemoveContainer" containerID="dec3beff6abbdcf32f4d602873fe4ab229755aef532b1c3308ed69b08438e50e" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.261440 5014 scope.go:117] "RemoveContainer" containerID="7c61f00177c33532524b8163faef3133d7c88a0c6edeaf8d14eb4a6022f7abdd" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.276504 5014 scope.go:117] "RemoveContainer" containerID="6e30f54354b4e05d72cd38208e8b8588c2363ab84617b170457752ebb404a286" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.297736 5014 scope.go:117] "RemoveContainer" containerID="cc9f5a4202ce6f9f16ab8d8453f50c25c26d08e6f673de106491b3f0b64a0984" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.317554 5014 scope.go:117] "RemoveContainer" containerID="b521f92d690d7f4582f8adf8d2826fa33c967ac340f2621ad4c5efb64bf29cfb" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.340900 5014 scope.go:117] "RemoveContainer" containerID="b521f92d690d7f4582f8adf8d2826fa33c967ac340f2621ad4c5efb64bf29cfb" Oct 06 21:53:11 crc kubenswrapper[5014]: E1006 21:53:11.341294 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b521f92d690d7f4582f8adf8d2826fa33c967ac340f2621ad4c5efb64bf29cfb\": container with ID starting with b521f92d690d7f4582f8adf8d2826fa33c967ac340f2621ad4c5efb64bf29cfb not found: ID does not exist" containerID="b521f92d690d7f4582f8adf8d2826fa33c967ac340f2621ad4c5efb64bf29cfb" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.341333 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b521f92d690d7f4582f8adf8d2826fa33c967ac340f2621ad4c5efb64bf29cfb"} err="failed to get container status \"b521f92d690d7f4582f8adf8d2826fa33c967ac340f2621ad4c5efb64bf29cfb\": rpc error: code = NotFound desc = could not find container \"b521f92d690d7f4582f8adf8d2826fa33c967ac340f2621ad4c5efb64bf29cfb\": container with ID starting with b521f92d690d7f4582f8adf8d2826fa33c967ac340f2621ad4c5efb64bf29cfb not found: ID does not exist" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.500430 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" path="/var/lib/kubelet/pods/02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf/volumes" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.501262 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0bd07ab2-973f-4531-8e5f-68d349e231b4" path="/var/lib/kubelet/pods/0bd07ab2-973f-4531-8e5f-68d349e231b4/volumes" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.502069 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13fe806f-0e4e-4ea7-838b-938c5fe74c99" path="/var/lib/kubelet/pods/13fe806f-0e4e-4ea7-838b-938c5fe74c99/volumes" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.503424 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1dfeacf8-a072-4b44-bed9-618acd31fb6f" path="/var/lib/kubelet/pods/1dfeacf8-a072-4b44-bed9-618acd31fb6f/volumes" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.504384 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" path="/var/lib/kubelet/pods/2439cd4a-a6a0-4b85-b4d5-3952b61af5bd/volumes" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.505832 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" path="/var/lib/kubelet/pods/32e89ff3-0d60-4fd1-9c0d-831b92311165/volumes" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.506779 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c1e8f70-227b-40e0-aceb-470eed382180" path="/var/lib/kubelet/pods/3c1e8f70-227b-40e0-aceb-470eed382180/volumes" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.507592 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a3bf50b-cb91-4201-affd-0c42d3585df2" path="/var/lib/kubelet/pods/4a3bf50b-cb91-4201-affd-0c42d3585df2/volumes" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.509132 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56ddac83-2b9f-4a7d-a9c9-02de44e0ab00" path="/var/lib/kubelet/pods/56ddac83-2b9f-4a7d-a9c9-02de44e0ab00/volumes" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.509891 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd16c866-91b0-4261-a084-7a96ac597c04" path="/var/lib/kubelet/pods/cd16c866-91b0-4261-a084-7a96ac597c04/volumes" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.511095 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cefbd6b6-3aa3-459d-9f8d-060736f4de92" path="/var/lib/kubelet/pods/cefbd6b6-3aa3-459d-9f8d-060736f4de92/volumes" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.512196 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="daa5c33b-d941-4030-bf9f-cd6ed831986e" path="/var/lib/kubelet/pods/daa5c33b-d941-4030-bf9f-cd6ed831986e/volumes" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.634171 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c8f59d7d-f71b-46b0-bd32-476a2517a3b6","Type":"ContainerDied","Data":"6f1e338aff0051925912772b4d9266eeb391a4d547b790e5d76d0bc406be12bb"} Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.634222 5014 scope.go:117] "RemoveContainer" containerID="eb4d1ac3e92d3cfcfc09e4936b90190475ab22e4925f8ce4f363a59470abfbe5" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.634885 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.651232 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_dab38488-ecfd-45ec-bb75-5e3d5bdd42e7/ovn-northd/0.log" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.651307 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"dab38488-ecfd-45ec-bb75-5e3d5bdd42e7","Type":"ContainerDied","Data":"838b092f31e9d67188d165fd454fdb34ab0bb400cb117d86d9871925bed6ba7d"} Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.651397 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.663873 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.669635 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-78bf4bbdb7-6fpl9" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.672466 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.681298 5014 scope.go:117] "RemoveContainer" containerID="09d7c2ebf6323f6087d0027d356d50b1abd8029b6b521b27fd0322faa1293a5d" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.682104 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.689867 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.703144 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-78bf4bbdb7-6fpl9"] Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.710185 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-78bf4bbdb7-6fpl9"] Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.712238 5014 scope.go:117] "RemoveContainer" containerID="d9b24ea6635bde477fa710678733d33fd5f1e17d35ee8b466a9fe4445920964c" Oct 06 21:53:11 crc kubenswrapper[5014]: I1006 21:53:11.751227 5014 scope.go:117] "RemoveContainer" containerID="a5aaa8e6da3bb1475013fcf9505c94fb4f157784a202b67b1fafdd6706d374db" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.212451 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.267860 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7de90fe7-747a-4334-be2a-d3b5ee6b8148-config-data\") pod \"7de90fe7-747a-4334-be2a-d3b5ee6b8148\" (UID: \"7de90fe7-747a-4334-be2a-d3b5ee6b8148\") " Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.267982 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p6bmz\" (UniqueName: \"kubernetes.io/projected/7de90fe7-747a-4334-be2a-d3b5ee6b8148-kube-api-access-p6bmz\") pod \"7de90fe7-747a-4334-be2a-d3b5ee6b8148\" (UID: \"7de90fe7-747a-4334-be2a-d3b5ee6b8148\") " Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.268120 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7de90fe7-747a-4334-be2a-d3b5ee6b8148-combined-ca-bundle\") pod \"7de90fe7-747a-4334-be2a-d3b5ee6b8148\" (UID: \"7de90fe7-747a-4334-be2a-d3b5ee6b8148\") " Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.273089 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7de90fe7-747a-4334-be2a-d3b5ee6b8148-kube-api-access-p6bmz" (OuterVolumeSpecName: "kube-api-access-p6bmz") pod "7de90fe7-747a-4334-be2a-d3b5ee6b8148" (UID: "7de90fe7-747a-4334-be2a-d3b5ee6b8148"). InnerVolumeSpecName "kube-api-access-p6bmz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.290900 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7de90fe7-747a-4334-be2a-d3b5ee6b8148-config-data" (OuterVolumeSpecName: "config-data") pod "7de90fe7-747a-4334-be2a-d3b5ee6b8148" (UID: "7de90fe7-747a-4334-be2a-d3b5ee6b8148"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.311051 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7de90fe7-747a-4334-be2a-d3b5ee6b8148-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7de90fe7-747a-4334-be2a-d3b5ee6b8148" (UID: "7de90fe7-747a-4334-be2a-d3b5ee6b8148"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.352527 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.370206 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p6bmz\" (UniqueName: \"kubernetes.io/projected/7de90fe7-747a-4334-be2a-d3b5ee6b8148-kube-api-access-p6bmz\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.370230 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7de90fe7-747a-4334-be2a-d3b5ee6b8148-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.370239 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7de90fe7-747a-4334-be2a-d3b5ee6b8148-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.471600 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4b977fc8-6c11-41e6-9500-f0da2d66aea1-pod-info\") pod \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.471668 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4b977fc8-6c11-41e6-9500-f0da2d66aea1-erlang-cookie-secret\") pod \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.471695 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-tls\") pod \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.471720 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwpgx\" (UniqueName: \"kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-kube-api-access-wwpgx\") pod \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.471759 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-config-data\") pod \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.471829 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-erlang-cookie\") pod \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.471849 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-plugins-conf\") pod \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.471878 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-confd\") pod \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.471903 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-server-conf\") pod \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.471950 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.471983 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-plugins\") pod \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\" (UID: \"4b977fc8-6c11-41e6-9500-f0da2d66aea1\") " Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.473569 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "4b977fc8-6c11-41e6-9500-f0da2d66aea1" (UID: "4b977fc8-6c11-41e6-9500-f0da2d66aea1"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.474055 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "4b977fc8-6c11-41e6-9500-f0da2d66aea1" (UID: "4b977fc8-6c11-41e6-9500-f0da2d66aea1"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.474534 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "4b977fc8-6c11-41e6-9500-f0da2d66aea1" (UID: "4b977fc8-6c11-41e6-9500-f0da2d66aea1"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.480960 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b977fc8-6c11-41e6-9500-f0da2d66aea1-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "4b977fc8-6c11-41e6-9500-f0da2d66aea1" (UID: "4b977fc8-6c11-41e6-9500-f0da2d66aea1"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.481518 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "4b977fc8-6c11-41e6-9500-f0da2d66aea1" (UID: "4b977fc8-6c11-41e6-9500-f0da2d66aea1"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.482943 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-kube-api-access-wwpgx" (OuterVolumeSpecName: "kube-api-access-wwpgx") pod "4b977fc8-6c11-41e6-9500-f0da2d66aea1" (UID: "4b977fc8-6c11-41e6-9500-f0da2d66aea1"). InnerVolumeSpecName "kube-api-access-wwpgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.483321 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "persistence") pod "4b977fc8-6c11-41e6-9500-f0da2d66aea1" (UID: "4b977fc8-6c11-41e6-9500-f0da2d66aea1"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.483612 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/4b977fc8-6c11-41e6-9500-f0da2d66aea1-pod-info" (OuterVolumeSpecName: "pod-info") pod "4b977fc8-6c11-41e6-9500-f0da2d66aea1" (UID: "4b977fc8-6c11-41e6-9500-f0da2d66aea1"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.491148 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-config-data" (OuterVolumeSpecName: "config-data") pod "4b977fc8-6c11-41e6-9500-f0da2d66aea1" (UID: "4b977fc8-6c11-41e6-9500-f0da2d66aea1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.514707 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-server-conf" (OuterVolumeSpecName: "server-conf") pod "4b977fc8-6c11-41e6-9500-f0da2d66aea1" (UID: "4b977fc8-6c11-41e6-9500-f0da2d66aea1"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.566148 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "4b977fc8-6c11-41e6-9500-f0da2d66aea1" (UID: "4b977fc8-6c11-41e6-9500-f0da2d66aea1"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.574281 5014 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4b977fc8-6c11-41e6-9500-f0da2d66aea1-pod-info\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.574321 5014 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4b977fc8-6c11-41e6-9500-f0da2d66aea1-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.574335 5014 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.574348 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwpgx\" (UniqueName: \"kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-kube-api-access-wwpgx\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.574361 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.574373 5014 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.574384 5014 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-plugins-conf\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.574395 5014 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.574407 5014 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4b977fc8-6c11-41e6-9500-f0da2d66aea1-server-conf\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.574434 5014 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.574447 5014 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4b977fc8-6c11-41e6-9500-f0da2d66aea1-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.593318 5014 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.676147 5014 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.682013 5014 generic.go:334] "Generic (PLEG): container finished" podID="4b977fc8-6c11-41e6-9500-f0da2d66aea1" containerID="5d8ae003e8dc923a3ad6afd87e268712900c1d33e130493581a8045c5495deed" exitCode=0 Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.682090 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4b977fc8-6c11-41e6-9500-f0da2d66aea1","Type":"ContainerDied","Data":"5d8ae003e8dc923a3ad6afd87e268712900c1d33e130493581a8045c5495deed"} Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.682109 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.682136 5014 scope.go:117] "RemoveContainer" containerID="5d8ae003e8dc923a3ad6afd87e268712900c1d33e130493581a8045c5495deed" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.682121 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4b977fc8-6c11-41e6-9500-f0da2d66aea1","Type":"ContainerDied","Data":"6cafdd1c4b027c8666bbc2f9206f42e474fdfb5f4dc256a546e92a716c66b2c9"} Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.688392 5014 generic.go:334] "Generic (PLEG): container finished" podID="7de90fe7-747a-4334-be2a-d3b5ee6b8148" containerID="3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370" exitCode=0 Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.688575 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"7de90fe7-747a-4334-be2a-d3b5ee6b8148","Type":"ContainerDied","Data":"3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370"} Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.688741 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"7de90fe7-747a-4334-be2a-d3b5ee6b8148","Type":"ContainerDied","Data":"5a7f15bf6b0fcbdd2b4dfd00fd05e8dfa2b0f035fdab7de942b9739ae9111317"} Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.688918 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.717649 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.722027 5014 scope.go:117] "RemoveContainer" containerID="166b1c2d8ef910ca7a9787911a267a186c4cd418c36eafd7d791b96769ac9953" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.731030 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.749308 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.756038 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.756997 5014 scope.go:117] "RemoveContainer" containerID="5d8ae003e8dc923a3ad6afd87e268712900c1d33e130493581a8045c5495deed" Oct 06 21:53:12 crc kubenswrapper[5014]: E1006 21:53:12.757414 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d8ae003e8dc923a3ad6afd87e268712900c1d33e130493581a8045c5495deed\": container with ID starting with 5d8ae003e8dc923a3ad6afd87e268712900c1d33e130493581a8045c5495deed not found: ID does not exist" containerID="5d8ae003e8dc923a3ad6afd87e268712900c1d33e130493581a8045c5495deed" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.757452 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d8ae003e8dc923a3ad6afd87e268712900c1d33e130493581a8045c5495deed"} err="failed to get container status \"5d8ae003e8dc923a3ad6afd87e268712900c1d33e130493581a8045c5495deed\": rpc error: code = NotFound desc = could not find container \"5d8ae003e8dc923a3ad6afd87e268712900c1d33e130493581a8045c5495deed\": container with ID starting with 5d8ae003e8dc923a3ad6afd87e268712900c1d33e130493581a8045c5495deed not found: ID does not exist" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.757476 5014 scope.go:117] "RemoveContainer" containerID="166b1c2d8ef910ca7a9787911a267a186c4cd418c36eafd7d791b96769ac9953" Oct 06 21:53:12 crc kubenswrapper[5014]: E1006 21:53:12.757869 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"166b1c2d8ef910ca7a9787911a267a186c4cd418c36eafd7d791b96769ac9953\": container with ID starting with 166b1c2d8ef910ca7a9787911a267a186c4cd418c36eafd7d791b96769ac9953 not found: ID does not exist" containerID="166b1c2d8ef910ca7a9787911a267a186c4cd418c36eafd7d791b96769ac9953" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.757901 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"166b1c2d8ef910ca7a9787911a267a186c4cd418c36eafd7d791b96769ac9953"} err="failed to get container status \"166b1c2d8ef910ca7a9787911a267a186c4cd418c36eafd7d791b96769ac9953\": rpc error: code = NotFound desc = could not find container \"166b1c2d8ef910ca7a9787911a267a186c4cd418c36eafd7d791b96769ac9953\": container with ID starting with 166b1c2d8ef910ca7a9787911a267a186c4cd418c36eafd7d791b96769ac9953 not found: ID does not exist" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.757919 5014 scope.go:117] "RemoveContainer" containerID="3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.777537 5014 scope.go:117] "RemoveContainer" containerID="3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370" Oct 06 21:53:12 crc kubenswrapper[5014]: E1006 21:53:12.778028 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370\": container with ID starting with 3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370 not found: ID does not exist" containerID="3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370" Oct 06 21:53:12 crc kubenswrapper[5014]: I1006 21:53:12.778082 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370"} err="failed to get container status \"3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370\": rpc error: code = NotFound desc = could not find container \"3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370\": container with ID starting with 3f5abc18338b8768f9037c7e333ad21fcd316d1687dcfa47ad6b2752e1797370 not found: ID does not exist" Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.496537 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b977fc8-6c11-41e6-9500-f0da2d66aea1" path="/var/lib/kubelet/pods/4b977fc8-6c11-41e6-9500-f0da2d66aea1/volumes" Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.497325 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7de90fe7-747a-4334-be2a-d3b5ee6b8148" path="/var/lib/kubelet/pods/7de90fe7-747a-4334-be2a-d3b5ee6b8148/volumes" Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.497939 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8f59d7d-f71b-46b0-bd32-476a2517a3b6" path="/var/lib/kubelet/pods/c8f59d7d-f71b-46b0-bd32-476a2517a3b6/volumes" Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.498955 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" path="/var/lib/kubelet/pods/dab38488-ecfd-45ec-bb75-5e3d5bdd42e7/volumes" Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.499463 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fccac7f9-eeaa-4481-ab49-9e71dd8af79c" path="/var/lib/kubelet/pods/fccac7f9-eeaa-4481-ab49-9e71dd8af79c/volumes" Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.650830 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.688657 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ebf215b-a88f-4b08-8f2e-58284b7d4548-config-data\") pod \"6ebf215b-a88f-4b08-8f2e-58284b7d4548\" (UID: \"6ebf215b-a88f-4b08-8f2e-58284b7d4548\") " Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.688747 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ebf215b-a88f-4b08-8f2e-58284b7d4548-combined-ca-bundle\") pod \"6ebf215b-a88f-4b08-8f2e-58284b7d4548\" (UID: \"6ebf215b-a88f-4b08-8f2e-58284b7d4548\") " Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.688848 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvbzs\" (UniqueName: \"kubernetes.io/projected/6ebf215b-a88f-4b08-8f2e-58284b7d4548-kube-api-access-cvbzs\") pod \"6ebf215b-a88f-4b08-8f2e-58284b7d4548\" (UID: \"6ebf215b-a88f-4b08-8f2e-58284b7d4548\") " Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.695083 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ebf215b-a88f-4b08-8f2e-58284b7d4548-kube-api-access-cvbzs" (OuterVolumeSpecName: "kube-api-access-cvbzs") pod "6ebf215b-a88f-4b08-8f2e-58284b7d4548" (UID: "6ebf215b-a88f-4b08-8f2e-58284b7d4548"). InnerVolumeSpecName "kube-api-access-cvbzs". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.702573 5014 generic.go:334] "Generic (PLEG): container finished" podID="6ebf215b-a88f-4b08-8f2e-58284b7d4548" containerID="e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee" exitCode=0 Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.702679 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"6ebf215b-a88f-4b08-8f2e-58284b7d4548","Type":"ContainerDied","Data":"e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee"} Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.702711 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"6ebf215b-a88f-4b08-8f2e-58284b7d4548","Type":"ContainerDied","Data":"9dcf19a5822115a7b4fcf3b6501c38303e0234f8c503694ae4a102f7764efe47"} Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.702733 5014 scope.go:117] "RemoveContainer" containerID="e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee" Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.702906 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.717310 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ebf215b-a88f-4b08-8f2e-58284b7d4548-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6ebf215b-a88f-4b08-8f2e-58284b7d4548" (UID: "6ebf215b-a88f-4b08-8f2e-58284b7d4548"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.730865 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ebf215b-a88f-4b08-8f2e-58284b7d4548-config-data" (OuterVolumeSpecName: "config-data") pod "6ebf215b-a88f-4b08-8f2e-58284b7d4548" (UID: "6ebf215b-a88f-4b08-8f2e-58284b7d4548"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.759280 5014 scope.go:117] "RemoveContainer" containerID="e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee" Oct 06 21:53:13 crc kubenswrapper[5014]: E1006 21:53:13.759781 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee\": container with ID starting with e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee not found: ID does not exist" containerID="e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee" Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.759811 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee"} err="failed to get container status \"e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee\": rpc error: code = NotFound desc = could not find container \"e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee\": container with ID starting with e134d265797dc7e1d0c12be8cd7685d7bd4a43e37de32b1e2a3e9b0772e3e5ee not found: ID does not exist" Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.790452 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvbzs\" (UniqueName: \"kubernetes.io/projected/6ebf215b-a88f-4b08-8f2e-58284b7d4548-kube-api-access-cvbzs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.790484 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ebf215b-a88f-4b08-8f2e-58284b7d4548-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:13 crc kubenswrapper[5014]: I1006 21:53:13.790496 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ebf215b-a88f-4b08-8f2e-58284b7d4548-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:14 crc kubenswrapper[5014]: I1006 21:53:14.029637 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 06 21:53:14 crc kubenswrapper[5014]: I1006 21:53:14.035729 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 06 21:53:15 crc kubenswrapper[5014]: I1006 21:53:15.499488 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ebf215b-a88f-4b08-8f2e-58284b7d4548" path="/var/lib/kubelet/pods/6ebf215b-a88f-4b08-8f2e-58284b7d4548/volumes" Oct 06 21:53:15 crc kubenswrapper[5014]: E1006 21:53:15.828506 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:15 crc kubenswrapper[5014]: E1006 21:53:15.828545 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:15 crc kubenswrapper[5014]: E1006 21:53:15.829922 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:15 crc kubenswrapper[5014]: E1006 21:53:15.830473 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:15 crc kubenswrapper[5014]: E1006 21:53:15.830534 5014 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-fwbdt" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovsdb-server" Oct 06 21:53:15 crc kubenswrapper[5014]: E1006 21:53:15.831433 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:15 crc kubenswrapper[5014]: E1006 21:53:15.837881 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:15 crc kubenswrapper[5014]: E1006 21:53:15.838027 5014 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-fwbdt" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovs-vswitchd" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.362888 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.534278 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-ovndb-tls-certs\") pod \"7eeb278b-517f-4b26-825e-12d7d0d969ce\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.534762 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-internal-tls-certs\") pod \"7eeb278b-517f-4b26-825e-12d7d0d969ce\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.534830 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-config\") pod \"7eeb278b-517f-4b26-825e-12d7d0d969ce\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.534857 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-combined-ca-bundle\") pod \"7eeb278b-517f-4b26-825e-12d7d0d969ce\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.534898 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-httpd-config\") pod \"7eeb278b-517f-4b26-825e-12d7d0d969ce\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.534951 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzl4l\" (UniqueName: \"kubernetes.io/projected/7eeb278b-517f-4b26-825e-12d7d0d969ce-kube-api-access-nzl4l\") pod \"7eeb278b-517f-4b26-825e-12d7d0d969ce\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.534990 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-public-tls-certs\") pod \"7eeb278b-517f-4b26-825e-12d7d0d969ce\" (UID: \"7eeb278b-517f-4b26-825e-12d7d0d969ce\") " Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.555774 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7eeb278b-517f-4b26-825e-12d7d0d969ce-kube-api-access-nzl4l" (OuterVolumeSpecName: "kube-api-access-nzl4l") pod "7eeb278b-517f-4b26-825e-12d7d0d969ce" (UID: "7eeb278b-517f-4b26-825e-12d7d0d969ce"). InnerVolumeSpecName "kube-api-access-nzl4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.558419 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "7eeb278b-517f-4b26-825e-12d7d0d969ce" (UID: "7eeb278b-517f-4b26-825e-12d7d0d969ce"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.574254 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7eeb278b-517f-4b26-825e-12d7d0d969ce" (UID: "7eeb278b-517f-4b26-825e-12d7d0d969ce"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.599612 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "7eeb278b-517f-4b26-825e-12d7d0d969ce" (UID: "7eeb278b-517f-4b26-825e-12d7d0d969ce"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.620332 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "7eeb278b-517f-4b26-825e-12d7d0d969ce" (UID: "7eeb278b-517f-4b26-825e-12d7d0d969ce"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.621475 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-config" (OuterVolumeSpecName: "config") pod "7eeb278b-517f-4b26-825e-12d7d0d969ce" (UID: "7eeb278b-517f-4b26-825e-12d7d0d969ce"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.622601 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "7eeb278b-517f-4b26-825e-12d7d0d969ce" (UID: "7eeb278b-517f-4b26-825e-12d7d0d969ce"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.636455 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.636480 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.636490 5014 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-httpd-config\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.636499 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzl4l\" (UniqueName: \"kubernetes.io/projected/7eeb278b-517f-4b26-825e-12d7d0d969ce-kube-api-access-nzl4l\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.636507 5014 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.636515 5014 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.636522 5014 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eeb278b-517f-4b26-825e-12d7d0d969ce-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.740050 5014 generic.go:334] "Generic (PLEG): container finished" podID="7eeb278b-517f-4b26-825e-12d7d0d969ce" containerID="7313b7e6181c156a532c5a8a8da1000e022b6207ee08165f639a58cf73fc9b6a" exitCode=0 Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.740109 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f6fff5c8f-xbgr9" event={"ID":"7eeb278b-517f-4b26-825e-12d7d0d969ce","Type":"ContainerDied","Data":"7313b7e6181c156a532c5a8a8da1000e022b6207ee08165f639a58cf73fc9b6a"} Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.740141 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f6fff5c8f-xbgr9" event={"ID":"7eeb278b-517f-4b26-825e-12d7d0d969ce","Type":"ContainerDied","Data":"d4cb4b18ad9fe5bb0744cb2b18016a4335ead96a41d98c32ec1dacef032177fd"} Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.740165 5014 scope.go:117] "RemoveContainer" containerID="48a0f810064b04d6c7a9053d863e731b5f0654d50ec97630c69d7ccb6c16e34f" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.740272 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-f6fff5c8f-xbgr9" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.779217 5014 scope.go:117] "RemoveContainer" containerID="7313b7e6181c156a532c5a8a8da1000e022b6207ee08165f639a58cf73fc9b6a" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.783462 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-f6fff5c8f-xbgr9"] Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.790981 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-f6fff5c8f-xbgr9"] Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.819178 5014 scope.go:117] "RemoveContainer" containerID="48a0f810064b04d6c7a9053d863e731b5f0654d50ec97630c69d7ccb6c16e34f" Oct 06 21:53:16 crc kubenswrapper[5014]: E1006 21:53:16.819753 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48a0f810064b04d6c7a9053d863e731b5f0654d50ec97630c69d7ccb6c16e34f\": container with ID starting with 48a0f810064b04d6c7a9053d863e731b5f0654d50ec97630c69d7ccb6c16e34f not found: ID does not exist" containerID="48a0f810064b04d6c7a9053d863e731b5f0654d50ec97630c69d7ccb6c16e34f" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.819802 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48a0f810064b04d6c7a9053d863e731b5f0654d50ec97630c69d7ccb6c16e34f"} err="failed to get container status \"48a0f810064b04d6c7a9053d863e731b5f0654d50ec97630c69d7ccb6c16e34f\": rpc error: code = NotFound desc = could not find container \"48a0f810064b04d6c7a9053d863e731b5f0654d50ec97630c69d7ccb6c16e34f\": container with ID starting with 48a0f810064b04d6c7a9053d863e731b5f0654d50ec97630c69d7ccb6c16e34f not found: ID does not exist" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.819836 5014 scope.go:117] "RemoveContainer" containerID="7313b7e6181c156a532c5a8a8da1000e022b6207ee08165f639a58cf73fc9b6a" Oct 06 21:53:16 crc kubenswrapper[5014]: E1006 21:53:16.820500 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7313b7e6181c156a532c5a8a8da1000e022b6207ee08165f639a58cf73fc9b6a\": container with ID starting with 7313b7e6181c156a532c5a8a8da1000e022b6207ee08165f639a58cf73fc9b6a not found: ID does not exist" containerID="7313b7e6181c156a532c5a8a8da1000e022b6207ee08165f639a58cf73fc9b6a" Oct 06 21:53:16 crc kubenswrapper[5014]: I1006 21:53:16.820544 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7313b7e6181c156a532c5a8a8da1000e022b6207ee08165f639a58cf73fc9b6a"} err="failed to get container status \"7313b7e6181c156a532c5a8a8da1000e022b6207ee08165f639a58cf73fc9b6a\": rpc error: code = NotFound desc = could not find container \"7313b7e6181c156a532c5a8a8da1000e022b6207ee08165f639a58cf73fc9b6a\": container with ID starting with 7313b7e6181c156a532c5a8a8da1000e022b6207ee08165f639a58cf73fc9b6a not found: ID does not exist" Oct 06 21:53:17 crc kubenswrapper[5014]: I1006 21:53:17.503858 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7eeb278b-517f-4b26-825e-12d7d0d969ce" path="/var/lib/kubelet/pods/7eeb278b-517f-4b26-825e-12d7d0d969ce/volumes" Oct 06 21:53:20 crc kubenswrapper[5014]: E1006 21:53:20.818134 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:20 crc kubenswrapper[5014]: E1006 21:53:20.820411 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:20 crc kubenswrapper[5014]: E1006 21:53:20.821406 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:20 crc kubenswrapper[5014]: E1006 21:53:20.821507 5014 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-fwbdt" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovsdb-server" Oct 06 21:53:20 crc kubenswrapper[5014]: E1006 21:53:20.822438 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:20 crc kubenswrapper[5014]: E1006 21:53:20.824755 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:20 crc kubenswrapper[5014]: E1006 21:53:20.826671 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:20 crc kubenswrapper[5014]: E1006 21:53:20.826741 5014 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-fwbdt" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovs-vswitchd" Oct 06 21:53:25 crc kubenswrapper[5014]: E1006 21:53:25.819945 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:25 crc kubenswrapper[5014]: E1006 21:53:25.820444 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:25 crc kubenswrapper[5014]: E1006 21:53:25.823253 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:25 crc kubenswrapper[5014]: E1006 21:53:25.823794 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:25 crc kubenswrapper[5014]: E1006 21:53:25.823934 5014 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-fwbdt" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovsdb-server" Oct 06 21:53:25 crc kubenswrapper[5014]: E1006 21:53:25.824078 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:25 crc kubenswrapper[5014]: E1006 21:53:25.826201 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:25 crc kubenswrapper[5014]: E1006 21:53:25.826287 5014 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-fwbdt" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovs-vswitchd" Oct 06 21:53:30 crc kubenswrapper[5014]: E1006 21:53:30.818770 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:30 crc kubenswrapper[5014]: E1006 21:53:30.819753 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:30 crc kubenswrapper[5014]: E1006 21:53:30.820419 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 06 21:53:30 crc kubenswrapper[5014]: E1006 21:53:30.820464 5014 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-fwbdt" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovsdb-server" Oct 06 21:53:30 crc kubenswrapper[5014]: E1006 21:53:30.822417 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:30 crc kubenswrapper[5014]: E1006 21:53:30.824894 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:30 crc kubenswrapper[5014]: E1006 21:53:30.827321 5014 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 06 21:53:30 crc kubenswrapper[5014]: E1006 21:53:30.827683 5014 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-fwbdt" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovs-vswitchd" Oct 06 21:53:32 crc kubenswrapper[5014]: I1006 21:53:32.929865 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-fwbdt_9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345/ovs-vswitchd/0.log" Oct 06 21:53:32 crc kubenswrapper[5014]: I1006 21:53:32.931406 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:53:32 crc kubenswrapper[5014]: I1006 21:53:32.940324 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-fwbdt_9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345/ovs-vswitchd/0.log" Oct 06 21:53:32 crc kubenswrapper[5014]: I1006 21:53:32.941261 5014 generic.go:334] "Generic (PLEG): container finished" podID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" exitCode=137 Oct 06 21:53:32 crc kubenswrapper[5014]: I1006 21:53:32.941296 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-fwbdt" event={"ID":"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345","Type":"ContainerDied","Data":"54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c"} Oct 06 21:53:32 crc kubenswrapper[5014]: I1006 21:53:32.941326 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-fwbdt" event={"ID":"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345","Type":"ContainerDied","Data":"9c266677ca4f4946e15eb4c16ea59938797fcc27820ff4d9ca1f4c4e6c1b8262"} Oct 06 21:53:32 crc kubenswrapper[5014]: I1006 21:53:32.941362 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-fwbdt" Oct 06 21:53:32 crc kubenswrapper[5014]: I1006 21:53:32.941367 5014 scope.go:117] "RemoveContainer" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" Oct 06 21:53:32 crc kubenswrapper[5014]: I1006 21:53:32.996118 5014 scope.go:117] "RemoveContainer" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.031321 5014 scope.go:117] "RemoveContainer" containerID="f6e5006398e333de1633332d6b3c0f4a9fbd91041ff9cc5f2132f9ac4ddff2c7" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.058935 5014 scope.go:117] "RemoveContainer" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" Oct 06 21:53:33 crc kubenswrapper[5014]: E1006 21:53:33.059426 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c\": container with ID starting with 54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c not found: ID does not exist" containerID="54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.059481 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c"} err="failed to get container status \"54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c\": rpc error: code = NotFound desc = could not find container \"54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c\": container with ID starting with 54bb17b43a951a987777684ddc41ab383ba9b51ebb708407107c926fb85c898c not found: ID does not exist" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.059518 5014 scope.go:117] "RemoveContainer" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" Oct 06 21:53:33 crc kubenswrapper[5014]: E1006 21:53:33.060295 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc\": container with ID starting with 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc not found: ID does not exist" containerID="52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.060324 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc"} err="failed to get container status \"52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc\": rpc error: code = NotFound desc = could not find container \"52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc\": container with ID starting with 52b75f5592a861465107b0d9f25d61ca8ddad4d4300f442279649f0568f495dc not found: ID does not exist" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.060338 5014 scope.go:117] "RemoveContainer" containerID="f6e5006398e333de1633332d6b3c0f4a9fbd91041ff9cc5f2132f9ac4ddff2c7" Oct 06 21:53:33 crc kubenswrapper[5014]: E1006 21:53:33.060733 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6e5006398e333de1633332d6b3c0f4a9fbd91041ff9cc5f2132f9ac4ddff2c7\": container with ID starting with f6e5006398e333de1633332d6b3c0f4a9fbd91041ff9cc5f2132f9ac4ddff2c7 not found: ID does not exist" containerID="f6e5006398e333de1633332d6b3c0f4a9fbd91041ff9cc5f2132f9ac4ddff2c7" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.060847 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6e5006398e333de1633332d6b3c0f4a9fbd91041ff9cc5f2132f9ac4ddff2c7"} err="failed to get container status \"f6e5006398e333de1633332d6b3c0f4a9fbd91041ff9cc5f2132f9ac4ddff2c7\": rpc error: code = NotFound desc = could not find container \"f6e5006398e333de1633332d6b3c0f4a9fbd91041ff9cc5f2132f9ac4ddff2c7\": container with ID starting with f6e5006398e333de1633332d6b3c0f4a9fbd91041ff9cc5f2132f9ac4ddff2c7 not found: ID does not exist" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.103540 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-etc-ovs\") pod \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.103606 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-lib\") pod \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.103665 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-run\") pod \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.103663 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" (UID: "9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.103712 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-log\") pod \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.103776 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvq6s\" (UniqueName: \"kubernetes.io/projected/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-kube-api-access-vvq6s\") pod \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.103803 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-scripts\") pod \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\" (UID: \"9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345\") " Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.103840 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-lib" (OuterVolumeSpecName: "var-lib") pod "9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" (UID: "9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.103802 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-run" (OuterVolumeSpecName: "var-run") pod "9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" (UID: "9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.104106 5014 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-etc-ovs\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.104122 5014 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-lib\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.104135 5014 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-run\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.103834 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-log" (OuterVolumeSpecName: "var-log") pod "9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" (UID: "9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.105464 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-scripts" (OuterVolumeSpecName: "scripts") pod "9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" (UID: "9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.109994 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-kube-api-access-vvq6s" (OuterVolumeSpecName: "kube-api-access-vvq6s") pod "9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" (UID: "9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345"). InnerVolumeSpecName "kube-api-access-vvq6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.206012 5014 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-var-log\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.206077 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vvq6s\" (UniqueName: \"kubernetes.io/projected/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-kube-api-access-vvq6s\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.206101 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.302956 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-fwbdt"] Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.311408 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-fwbdt"] Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.504594 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" path="/var/lib/kubelet/pods/9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345/volumes" Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.957887 5014 generic.go:334] "Generic (PLEG): container finished" podID="05220712-c8ae-4ac8-9c49-d74770367b33" containerID="794ec9c1ac30a70a53bd70e3b117f5a1d66ee9fd5b291b3742bca6c5ce899fad" exitCode=137 Oct 06 21:53:33 crc kubenswrapper[5014]: I1006 21:53:33.957933 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerDied","Data":"794ec9c1ac30a70a53bd70e3b117f5a1d66ee9fd5b291b3742bca6c5ce899fad"} Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.027515 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.118702 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"05220712-c8ae-4ac8-9c49-d74770367b33\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.118759 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/05220712-c8ae-4ac8-9c49-d74770367b33-cache\") pod \"05220712-c8ae-4ac8-9c49-d74770367b33\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.118838 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift\") pod \"05220712-c8ae-4ac8-9c49-d74770367b33\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.118904 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/05220712-c8ae-4ac8-9c49-d74770367b33-lock\") pod \"05220712-c8ae-4ac8-9c49-d74770367b33\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.118958 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzn7k\" (UniqueName: \"kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-kube-api-access-lzn7k\") pod \"05220712-c8ae-4ac8-9c49-d74770367b33\" (UID: \"05220712-c8ae-4ac8-9c49-d74770367b33\") " Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.119479 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05220712-c8ae-4ac8-9c49-d74770367b33-cache" (OuterVolumeSpecName: "cache") pod "05220712-c8ae-4ac8-9c49-d74770367b33" (UID: "05220712-c8ae-4ac8-9c49-d74770367b33"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.119610 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05220712-c8ae-4ac8-9c49-d74770367b33-lock" (OuterVolumeSpecName: "lock") pod "05220712-c8ae-4ac8-9c49-d74770367b33" (UID: "05220712-c8ae-4ac8-9c49-d74770367b33"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.123688 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-kube-api-access-lzn7k" (OuterVolumeSpecName: "kube-api-access-lzn7k") pod "05220712-c8ae-4ac8-9c49-d74770367b33" (UID: "05220712-c8ae-4ac8-9c49-d74770367b33"). InnerVolumeSpecName "kube-api-access-lzn7k". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.123941 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "05220712-c8ae-4ac8-9c49-d74770367b33" (UID: "05220712-c8ae-4ac8-9c49-d74770367b33"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.124229 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "swift") pod "05220712-c8ae-4ac8-9c49-d74770367b33" (UID: "05220712-c8ae-4ac8-9c49-d74770367b33"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.221387 5014 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.221447 5014 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/05220712-c8ae-4ac8-9c49-d74770367b33-cache\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.221470 5014 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-etc-swift\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.221490 5014 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/05220712-c8ae-4ac8-9c49-d74770367b33-lock\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.221511 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzn7k\" (UniqueName: \"kubernetes.io/projected/05220712-c8ae-4ac8-9c49-d74770367b33-kube-api-access-lzn7k\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.251365 5014 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.322398 5014 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.983171 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"05220712-c8ae-4ac8-9c49-d74770367b33","Type":"ContainerDied","Data":"388c1806671b0c63c0b894715eca2c4682ab7a227f6b8b059963f0fa1623aeb4"} Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.983254 5014 scope.go:117] "RemoveContainer" containerID="794ec9c1ac30a70a53bd70e3b117f5a1d66ee9fd5b291b3742bca6c5ce899fad" Oct 06 21:53:34 crc kubenswrapper[5014]: I1006 21:53:34.983307 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.019462 5014 scope.go:117] "RemoveContainer" containerID="46a2145bb6c9090f1616d68d05a57ae9daf788137e9fef57af9e989e14a6f0ac" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.049570 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.058647 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.063136 5014 scope.go:117] "RemoveContainer" containerID="706435a61f829966a3dd81b83578483182dd823c97c44d790d748dae50b96405" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.098158 5014 scope.go:117] "RemoveContainer" containerID="32fd41e8d0ac65e1df8af741b277995fce5511e9e71104405b7ef81473bcf2a0" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.131678 5014 scope.go:117] "RemoveContainer" containerID="8c6ee51bcb2aa47b868de1b54eca4d9f60363bb30fa19275e968859b3dd6b211" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.163020 5014 scope.go:117] "RemoveContainer" containerID="f82e937ea9ed292c5b6b2e0ed118bb02c000e888acec699801c0aef2ac147660" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.192786 5014 scope.go:117] "RemoveContainer" containerID="27a6ef41e95125c0eb0ca5c6f7e55fe49acb2d2f70f72c3b31eab3c0fb6cd5e9" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.224374 5014 scope.go:117] "RemoveContainer" containerID="1f314e90fabf2871f624de1f83d91f5c7a575db546028192458bff82b490aaf1" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.261286 5014 scope.go:117] "RemoveContainer" containerID="2493907335e39e153ca0be098c83d4742c5eba72021fb33f5ee6878e68153c1e" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.294027 5014 scope.go:117] "RemoveContainer" containerID="7cac454e8ca5e7af86acbea681e349fae219a72762c7c9b2d920802f4e900488" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.326936 5014 scope.go:117] "RemoveContainer" containerID="d474f69bb53df946b7ca116226cc152b798653cd694c8d2e13e91ca35d97a083" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.361793 5014 scope.go:117] "RemoveContainer" containerID="cd46a133abb2d0da04ee1bdf5e939ea29e0a1f05c5d2709ffb6fe66c7656b025" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.390764 5014 scope.go:117] "RemoveContainer" containerID="9721677542abcd800ca6abd93a96aae1a47615326a5084a4d6243c91345c2fac" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.429245 5014 scope.go:117] "RemoveContainer" containerID="c89697b2f7e511e4d98946d9a1e15c87f63523c323ed01382b801bdcf2a5fd0e" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.460739 5014 scope.go:117] "RemoveContainer" containerID="6c9c5b52c3ece4980e0a040effefe3ca51032003d2dec58743e81e14eff2bd08" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.503376 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" path="/var/lib/kubelet/pods/05220712-c8ae-4ac8-9c49-d74770367b33/volumes" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.860250 5014 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podc7d1001f-b56b-4d52-88bc-4f23831c3509"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podc7d1001f-b56b-4d52-88bc-4f23831c3509] : Timed out while waiting for systemd to remove kubepods-besteffort-podc7d1001f_b56b_4d52_88bc_4f23831c3509.slice" Oct 06 21:53:35 crc kubenswrapper[5014]: E1006 21:53:35.860305 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort podc7d1001f-b56b-4d52-88bc-4f23831c3509] : unable to destroy cgroup paths for cgroup [kubepods besteffort podc7d1001f-b56b-4d52-88bc-4f23831c3509] : Timed out while waiting for systemd to remove kubepods-besteffort-podc7d1001f_b56b_4d52_88bc_4f23831c3509.slice" pod="openstack/ovsdbserver-nb-0" podUID="c7d1001f-b56b-4d52-88bc-4f23831c3509" Oct 06 21:53:35 crc kubenswrapper[5014]: I1006 21:53:35.996006 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 06 21:53:36 crc kubenswrapper[5014]: I1006 21:53:36.049168 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 06 21:53:36 crc kubenswrapper[5014]: I1006 21:53:36.058034 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 06 21:53:37 crc kubenswrapper[5014]: I1006 21:53:37.505788 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7d1001f-b56b-4d52-88bc-4f23831c3509" path="/var/lib/kubelet/pods/c7d1001f-b56b-4d52-88bc-4f23831c3509/volumes" Oct 06 21:53:37 crc kubenswrapper[5014]: I1006 21:53:37.586935 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0ce1b-account-delete-p9nfs" Oct 06 21:53:37 crc kubenswrapper[5014]: I1006 21:53:37.688867 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b6mlf\" (UniqueName: \"kubernetes.io/projected/17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43-kube-api-access-b6mlf\") pod \"17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43\" (UID: \"17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43\") " Oct 06 21:53:37 crc kubenswrapper[5014]: I1006 21:53:37.695917 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43-kube-api-access-b6mlf" (OuterVolumeSpecName: "kube-api-access-b6mlf") pod "17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43" (UID: "17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43"). InnerVolumeSpecName "kube-api-access-b6mlf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:53:37 crc kubenswrapper[5014]: I1006 21:53:37.790737 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b6mlf\" (UniqueName: \"kubernetes.io/projected/17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43-kube-api-access-b6mlf\") on node \"crc\" DevicePath \"\"" Oct 06 21:53:37 crc kubenswrapper[5014]: I1006 21:53:37.911450 5014 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod78a24140-d3a5-463a-aaf9-49857f14decc"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod78a24140-d3a5-463a-aaf9-49857f14decc] : Timed out while waiting for systemd to remove kubepods-besteffort-pod78a24140_d3a5_463a_aaf9_49857f14decc.slice" Oct 06 21:53:37 crc kubenswrapper[5014]: E1006 21:53:37.911547 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod78a24140-d3a5-463a-aaf9-49857f14decc] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod78a24140-d3a5-463a-aaf9-49857f14decc] : Timed out while waiting for systemd to remove kubepods-besteffort-pod78a24140_d3a5_463a_aaf9_49857f14decc.slice" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" podUID="78a24140-d3a5-463a-aaf9-49857f14decc" Oct 06 21:53:38 crc kubenswrapper[5014]: I1006 21:53:38.026146 5014 generic.go:334] "Generic (PLEG): container finished" podID="17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43" containerID="55b66cf16d95823dc887b2d9d3ca9cadbc7015e7375de93759240008499fa1e9" exitCode=137 Oct 06 21:53:38 crc kubenswrapper[5014]: I1006 21:53:38.026255 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0ce1b-account-delete-p9nfs" event={"ID":"17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43","Type":"ContainerDied","Data":"55b66cf16d95823dc887b2d9d3ca9cadbc7015e7375de93759240008499fa1e9"} Oct 06 21:53:38 crc kubenswrapper[5014]: I1006 21:53:38.026297 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-796f6ffb8f-4rjtg" Oct 06 21:53:38 crc kubenswrapper[5014]: I1006 21:53:38.026338 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0ce1b-account-delete-p9nfs" event={"ID":"17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43","Type":"ContainerDied","Data":"dd97441d420ed9abcbc3cd0ee6fefab8dcd471113b74f34bc46d5c707e6eef6c"} Oct 06 21:53:38 crc kubenswrapper[5014]: I1006 21:53:38.026282 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0ce1b-account-delete-p9nfs" Oct 06 21:53:38 crc kubenswrapper[5014]: I1006 21:53:38.026363 5014 scope.go:117] "RemoveContainer" containerID="55b66cf16d95823dc887b2d9d3ca9cadbc7015e7375de93759240008499fa1e9" Oct 06 21:53:38 crc kubenswrapper[5014]: I1006 21:53:38.078879 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-796f6ffb8f-4rjtg"] Oct 06 21:53:38 crc kubenswrapper[5014]: I1006 21:53:38.087475 5014 scope.go:117] "RemoveContainer" containerID="55b66cf16d95823dc887b2d9d3ca9cadbc7015e7375de93759240008499fa1e9" Oct 06 21:53:38 crc kubenswrapper[5014]: E1006 21:53:38.088282 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55b66cf16d95823dc887b2d9d3ca9cadbc7015e7375de93759240008499fa1e9\": container with ID starting with 55b66cf16d95823dc887b2d9d3ca9cadbc7015e7375de93759240008499fa1e9 not found: ID does not exist" containerID="55b66cf16d95823dc887b2d9d3ca9cadbc7015e7375de93759240008499fa1e9" Oct 06 21:53:38 crc kubenswrapper[5014]: I1006 21:53:38.088342 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55b66cf16d95823dc887b2d9d3ca9cadbc7015e7375de93759240008499fa1e9"} err="failed to get container status \"55b66cf16d95823dc887b2d9d3ca9cadbc7015e7375de93759240008499fa1e9\": rpc error: code = NotFound desc = could not find container \"55b66cf16d95823dc887b2d9d3ca9cadbc7015e7375de93759240008499fa1e9\": container with ID starting with 55b66cf16d95823dc887b2d9d3ca9cadbc7015e7375de93759240008499fa1e9 not found: ID does not exist" Oct 06 21:53:38 crc kubenswrapper[5014]: I1006 21:53:38.095987 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-796f6ffb8f-4rjtg"] Oct 06 21:53:38 crc kubenswrapper[5014]: I1006 21:53:38.109952 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0ce1b-account-delete-p9nfs"] Oct 06 21:53:38 crc kubenswrapper[5014]: I1006 21:53:38.120296 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell0ce1b-account-delete-p9nfs"] Oct 06 21:53:39 crc kubenswrapper[5014]: I1006 21:53:39.500455 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43" path="/var/lib/kubelet/pods/17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43/volumes" Oct 06 21:53:39 crc kubenswrapper[5014]: I1006 21:53:39.502002 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78a24140-d3a5-463a-aaf9-49857f14decc" path="/var/lib/kubelet/pods/78a24140-d3a5-463a-aaf9-49857f14decc/volumes" Oct 06 21:53:51 crc kubenswrapper[5014]: I1006 21:53:51.735739 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:53:51 crc kubenswrapper[5014]: I1006 21:53:51.737770 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:53:58 crc kubenswrapper[5014]: I1006 21:53:58.186679 5014 scope.go:117] "RemoveContainer" containerID="1251818fc119fa5d31c10f6c1c00d043022d832df9c4ffca7ca60f60fea4e5af" Oct 06 21:54:21 crc kubenswrapper[5014]: I1006 21:54:21.735238 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:54:21 crc kubenswrapper[5014]: I1006 21:54:21.735979 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:54:51 crc kubenswrapper[5014]: I1006 21:54:51.735797 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:54:51 crc kubenswrapper[5014]: I1006 21:54:51.736483 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:54:51 crc kubenswrapper[5014]: I1006 21:54:51.736539 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:54:51 crc kubenswrapper[5014]: I1006 21:54:51.737295 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cbdf259ebb7c14fd9e76d97e1e3dfaef9e8de4a47de87f8694acba8d0b7b3bc4"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 21:54:51 crc kubenswrapper[5014]: I1006 21:54:51.737365 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://cbdf259ebb7c14fd9e76d97e1e3dfaef9e8de4a47de87f8694acba8d0b7b3bc4" gracePeriod=600 Oct 06 21:54:51 crc kubenswrapper[5014]: I1006 21:54:51.895664 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="cbdf259ebb7c14fd9e76d97e1e3dfaef9e8de4a47de87f8694acba8d0b7b3bc4" exitCode=0 Oct 06 21:54:51 crc kubenswrapper[5014]: I1006 21:54:51.895711 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"cbdf259ebb7c14fd9e76d97e1e3dfaef9e8de4a47de87f8694acba8d0b7b3bc4"} Oct 06 21:54:51 crc kubenswrapper[5014]: I1006 21:54:51.895787 5014 scope.go:117] "RemoveContainer" containerID="26989bcf2919a89c6621b6af8723e17c2272f8a4a0f8b3ba02a253747e518022" Oct 06 21:54:52 crc kubenswrapper[5014]: I1006 21:54:52.909705 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa"} Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.121858 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6j8c5"] Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.122761 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" containerName="mysql-bootstrap" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.122783 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" containerName="mysql-bootstrap" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.122816 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovsdb-server-init" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.122836 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovsdb-server-init" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.122852 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c301c8b-acb9-4008-9832-ce83dc524b6d" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.122866 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c301c8b-acb9-4008-9832-ce83dc524b6d" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.122891 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06e8dc30-8c95-4585-82e0-fc82de286a1c" containerName="cinder-api-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.122902 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="06e8dc30-8c95-4585-82e0-fc82de286a1c" containerName="cinder-api-log" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.122914 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dfeacf8-a072-4b44-bed9-618acd31fb6f" containerName="nova-metadata-metadata" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.122928 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dfeacf8-a072-4b44-bed9-618acd31fb6f" containerName="nova-metadata-metadata" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.122949 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovs-vswitchd" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.122961 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovs-vswitchd" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.122982 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8f59d7d-f71b-46b0-bd32-476a2517a3b6" containerName="rabbitmq" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.122993 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8f59d7d-f71b-46b0-bd32-476a2517a3b6" containerName="rabbitmq" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123010 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="proxy-httpd" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123021 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="proxy-httpd" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123036 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7eeb278b-517f-4b26-825e-12d7d0d969ce" containerName="neutron-api" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123047 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="7eeb278b-517f-4b26-825e-12d7d0d969ce" containerName="neutron-api" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123070 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49c2ecb8-63d7-4275-97ff-7aa899707212" containerName="nova-cell1-novncproxy-novncproxy" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123082 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="49c2ecb8-63d7-4275-97ff-7aa899707212" containerName="nova-cell1-novncproxy-novncproxy" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123105 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b18812d-9eec-4254-8633-b40f55244e47" containerName="mysql-bootstrap" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123117 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b18812d-9eec-4254-8633-b40f55244e47" containerName="mysql-bootstrap" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123140 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" containerName="cinder-scheduler" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123151 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" containerName="cinder-scheduler" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123171 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78a24140-d3a5-463a-aaf9-49857f14decc" containerName="proxy-httpd" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123183 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="78a24140-d3a5-463a-aaf9-49857f14decc" containerName="proxy-httpd" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123207 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-server" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123219 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-server" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123243 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daa5c33b-d941-4030-bf9f-cd6ed831986e" containerName="glance-httpd" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123254 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="daa5c33b-d941-4030-bf9f-cd6ed831986e" containerName="glance-httpd" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123273 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24c4cd4c-297a-45d8-ad6f-e24e53736ecc" containerName="kube-state-metrics" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123286 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="24c4cd4c-297a-45d8-ad6f-e24e53736ecc" containerName="kube-state-metrics" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123307 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78a24140-d3a5-463a-aaf9-49857f14decc" containerName="proxy-server" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123319 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="78a24140-d3a5-463a-aaf9-49857f14decc" containerName="proxy-server" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123341 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="account-auditor" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123353 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="account-auditor" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123374 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a3bf50b-cb91-4201-affd-0c42d3585df2" containerName="glance-httpd" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123385 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a3bf50b-cb91-4201-affd-0c42d3585df2" containerName="glance-httpd" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123407 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d674559-08b5-41c9-8783-a5e42504fb3e" containerName="openstack-network-exporter" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123419 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d674559-08b5-41c9-8783-a5e42504fb3e" containerName="openstack-network-exporter" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123432 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ebf215b-a88f-4b08-8f2e-58284b7d4548" containerName="nova-cell0-conductor-conductor" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123444 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ebf215b-a88f-4b08-8f2e-58284b7d4548" containerName="nova-cell0-conductor-conductor" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123460 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d91cde4-29d5-4947-b2ee-73e29ac244c2" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123472 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d91cde4-29d5-4947-b2ee-73e29ac244c2" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123489 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b18812d-9eec-4254-8633-b40f55244e47" containerName="galera" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123503 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b18812d-9eec-4254-8633-b40f55244e47" containerName="galera" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123519 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="swift-recon-cron" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123532 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="swift-recon-cron" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123556 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7de90fe7-747a-4334-be2a-d3b5ee6b8148" containerName="nova-cell1-conductor-conductor" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123569 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="7de90fe7-747a-4334-be2a-d3b5ee6b8148" containerName="nova-cell1-conductor-conductor" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123583 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c1e8f70-227b-40e0-aceb-470eed382180" containerName="nova-api-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123594 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c1e8f70-227b-40e0-aceb-470eed382180" containerName="nova-api-log" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123613 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="rsync" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123648 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="rsync" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123665 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123677 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123695 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="account-reaper" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123713 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="account-reaper" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123740 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cefbd6b6-3aa3-459d-9f8d-060736f4de92" containerName="nova-scheduler-scheduler" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123755 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="cefbd6b6-3aa3-459d-9f8d-060736f4de92" containerName="nova-scheduler-scheduler" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123781 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="411ad591-8dad-46ef-8a44-88e86f5c86dd" containerName="placement-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123795 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="411ad591-8dad-46ef-8a44-88e86f5c86dd" containerName="placement-log" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123823 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="account-replicator" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123834 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="account-replicator" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123858 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dfeacf8-a072-4b44-bed9-618acd31fb6f" containerName="nova-metadata-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123870 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dfeacf8-a072-4b44-bed9-618acd31fb6f" containerName="nova-metadata-log" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123885 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovsdb-server" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123897 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovsdb-server" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123912 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="container-replicator" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123923 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="container-replicator" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123942 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="044b7af9-01a7-40c1-803c-30e568aaf1fe" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.123954 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="044b7af9-01a7-40c1-803c-30e568aaf1fe" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.123993 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daa5c33b-d941-4030-bf9f-cd6ed831986e" containerName="glance-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124007 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="daa5c33b-d941-4030-bf9f-cd6ed831986e" containerName="glance-log" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124021 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c1e8f70-227b-40e0-aceb-470eed382180" containerName="nova-api-api" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124033 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c1e8f70-227b-40e0-aceb-470eed382180" containerName="nova-api-api" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124048 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-updater" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124060 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-updater" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124082 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bd07ab2-973f-4531-8e5f-68d349e231b4" containerName="barbican-api" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124095 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bd07ab2-973f-4531-8e5f-68d349e231b4" containerName="barbican-api" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124113 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b977fc8-6c11-41e6-9500-f0da2d66aea1" containerName="setup-container" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124125 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b977fc8-6c11-41e6-9500-f0da2d66aea1" containerName="setup-container" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124142 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" containerName="openstack-network-exporter" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124155 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" containerName="openstack-network-exporter" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124174 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13fe806f-0e4e-4ea7-838b-938c5fe74c99" containerName="barbican-keystone-listener" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124186 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="13fe806f-0e4e-4ea7-838b-938c5fe74c99" containerName="barbican-keystone-listener" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124206 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bd07ab2-973f-4531-8e5f-68d349e231b4" containerName="barbican-api-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124218 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bd07ab2-973f-4531-8e5f-68d349e231b4" containerName="barbican-api-log" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124240 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7d1001f-b56b-4d52-88bc-4f23831c3509" containerName="openstack-network-exporter" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124252 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7d1001f-b56b-4d52-88bc-4f23831c3509" containerName="openstack-network-exporter" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124269 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" containerName="probe" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124280 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" containerName="probe" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124298 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13fe806f-0e4e-4ea7-838b-938c5fe74c99" containerName="barbican-keystone-listener-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124310 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="13fe806f-0e4e-4ea7-838b-938c5fe74c99" containerName="barbican-keystone-listener-log" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124330 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="411ad591-8dad-46ef-8a44-88e86f5c86dd" containerName="placement-api" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124343 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="411ad591-8dad-46ef-8a44-88e86f5c86dd" containerName="placement-api" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124360 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" containerName="ovn-northd" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124371 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" containerName="ovn-northd" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124387 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74db136d-3445-4a7e-bcae-4645888ec806" containerName="ovn-controller" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124399 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="74db136d-3445-4a7e-bcae-4645888ec806" containerName="ovn-controller" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124414 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1132a0d0-bc9b-430d-a89e-33455c763b3c" containerName="openstack-network-exporter" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124426 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="1132a0d0-bc9b-430d-a89e-33455c763b3c" containerName="openstack-network-exporter" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124441 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="container-server" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124453 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="container-server" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124468 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06e8dc30-8c95-4585-82e0-fc82de286a1c" containerName="cinder-api" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124479 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="06e8dc30-8c95-4585-82e0-fc82de286a1c" containerName="cinder-api" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124502 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fccac7f9-eeaa-4481-ab49-9e71dd8af79c" containerName="keystone-api" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124513 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="fccac7f9-eeaa-4481-ab49-9e71dd8af79c" containerName="keystone-api" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124529 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56ddac83-2b9f-4a7d-a9c9-02de44e0ab00" containerName="barbican-worker" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124543 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="56ddac83-2b9f-4a7d-a9c9-02de44e0ab00" containerName="barbican-worker" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124568 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff29a0b3-1307-4fdb-bead-68d87f2f2923" containerName="init" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124583 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff29a0b3-1307-4fdb-bead-68d87f2f2923" containerName="init" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.124605 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff29a0b3-1307-4fdb-bead-68d87f2f2923" containerName="dnsmasq-dns" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.124979 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff29a0b3-1307-4fdb-bead-68d87f2f2923" containerName="dnsmasq-dns" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125006 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="ceilometer-central-agent" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125023 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="ceilometer-central-agent" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125040 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125075 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125096 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7eeb278b-517f-4b26-825e-12d7d0d969ce" containerName="neutron-httpd" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125111 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="7eeb278b-517f-4b26-825e-12d7d0d969ce" containerName="neutron-httpd" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125136 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="ceilometer-notification-agent" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125151 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="ceilometer-notification-agent" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125179 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd16c866-91b0-4261-a084-7a96ac597c04" containerName="memcached" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125194 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd16c866-91b0-4261-a084-7a96ac597c04" containerName="memcached" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125217 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="sg-core" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125234 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="sg-core" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125258 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56ddac83-2b9f-4a7d-a9c9-02de44e0ab00" containerName="barbican-worker-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125272 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="56ddac83-2b9f-4a7d-a9c9-02de44e0ab00" containerName="barbican-worker-log" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125290 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125302 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125316 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7d1001f-b56b-4d52-88bc-4f23831c3509" containerName="ovsdbserver-nb" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125328 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7d1001f-b56b-4d52-88bc-4f23831c3509" containerName="ovsdbserver-nb" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125347 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" containerName="galera" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125359 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" containerName="galera" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125375 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="account-server" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125387 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="account-server" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125410 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-replicator" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125422 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-replicator" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125437 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-auditor" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125448 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-auditor" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125462 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a3bf50b-cb91-4201-affd-0c42d3585df2" containerName="glance-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125474 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a3bf50b-cb91-4201-affd-0c42d3585df2" containerName="glance-log" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125493 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1132a0d0-bc9b-430d-a89e-33455c763b3c" containerName="ovsdbserver-sb" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125505 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="1132a0d0-bc9b-430d-a89e-33455c763b3c" containerName="ovsdbserver-sb" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125525 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-expirer" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125537 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-expirer" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125557 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ca96007-c66c-4e95-84b7-12eff893cca2" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125569 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ca96007-c66c-4e95-84b7-12eff893cca2" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125586 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8f59d7d-f71b-46b0-bd32-476a2517a3b6" containerName="setup-container" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125641 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8f59d7d-f71b-46b0-bd32-476a2517a3b6" containerName="setup-container" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125659 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b977fc8-6c11-41e6-9500-f0da2d66aea1" containerName="rabbitmq" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125671 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b977fc8-6c11-41e6-9500-f0da2d66aea1" containerName="rabbitmq" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125693 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="container-auditor" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125706 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="container-auditor" Oct 06 21:54:55 crc kubenswrapper[5014]: E1006 21:54:55.125724 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="container-updater" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.125736 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="container-updater" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126022 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="2439cd4a-a6a0-4b85-b4d5-3952b61af5bd" containerName="galera" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126040 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="fccac7f9-eeaa-4481-ab49-9e71dd8af79c" containerName="keystone-api" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126063 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ca96007-c66c-4e95-84b7-12eff893cca2" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126081 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="account-auditor" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126104 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="container-replicator" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126116 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="49c2ecb8-63d7-4275-97ff-7aa899707212" containerName="nova-cell1-novncproxy-novncproxy" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126139 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b18812d-9eec-4254-8633-b40f55244e47" containerName="galera" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126155 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-replicator" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126176 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7d1001f-b56b-4d52-88bc-4f23831c3509" containerName="ovsdbserver-nb" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126191 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bd07ab2-973f-4531-8e5f-68d349e231b4" containerName="barbican-api-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126205 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="account-reaper" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126224 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" containerName="probe" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126240 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="17ac8bc9-35ff-43b9-89fa-ed9e3ff70e43" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126254 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8f59d7d-f71b-46b0-bd32-476a2517a3b6" containerName="rabbitmq" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126269 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="06e8dc30-8c95-4585-82e0-fc82de286a1c" containerName="cinder-api" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126286 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-auditor" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126305 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d91cde4-29d5-4947-b2ee-73e29ac244c2" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126323 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c1e8f70-227b-40e0-aceb-470eed382180" containerName="nova-api-api" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126339 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="56ddac83-2b9f-4a7d-a9c9-02de44e0ab00" containerName="barbican-worker" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126351 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" containerName="ovn-northd" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126367 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="13fe806f-0e4e-4ea7-838b-938c5fe74c99" containerName="barbican-keystone-listener-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126381 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0ab5ea1-2549-4d17-8758-bfe2ad6e4b1c" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126397 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b977fc8-6c11-41e6-9500-f0da2d66aea1" containerName="rabbitmq" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126410 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="24c4cd4c-297a-45d8-ad6f-e24e53736ecc" containerName="kube-state-metrics" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126424 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7d1001f-b56b-4d52-88bc-4f23831c3509" containerName="openstack-network-exporter" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126489 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="dab38488-ecfd-45ec-bb75-5e3d5bdd42e7" containerName="openstack-network-exporter" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126503 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="container-updater" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126523 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="56ddac83-2b9f-4a7d-a9c9-02de44e0ab00" containerName="barbican-worker-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126539 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovsdb-server" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126556 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="ceilometer-central-agent" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126578 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="container-server" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126599 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="cefbd6b6-3aa3-459d-9f8d-060736f4de92" containerName="nova-scheduler-scheduler" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126643 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="account-replicator" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126660 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="daa5c33b-d941-4030-bf9f-cd6ed831986e" containerName="glance-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126674 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="account-server" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126690 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ebf215b-a88f-4b08-8f2e-58284b7d4548" containerName="nova-cell0-conductor-conductor" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126710 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="ceilometer-notification-agent" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126728 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="02f3cb70-5f2f-43fe-bf3b-892f5e76a1cf" containerName="cinder-scheduler" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126746 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-expirer" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126762 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="proxy-httpd" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126783 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a3bf50b-cb91-4201-affd-0c42d3585df2" containerName="glance-httpd" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126799 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff29a0b3-1307-4fdb-bead-68d87f2f2923" containerName="dnsmasq-dns" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126822 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c1e8f70-227b-40e0-aceb-470eed382180" containerName="nova-api-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126840 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="13fe806f-0e4e-4ea7-838b-938c5fe74c99" containerName="barbican-keystone-listener" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126859 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-updater" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126874 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="411ad591-8dad-46ef-8a44-88e86f5c86dd" containerName="placement-api" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126900 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a3bf50b-cb91-4201-affd-0c42d3585df2" containerName="glance-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126921 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="78a24140-d3a5-463a-aaf9-49857f14decc" containerName="proxy-server" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126943 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d674559-08b5-41c9-8783-a5e42504fb3e" containerName="openstack-network-exporter" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126968 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="object-server" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.126990 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="06e8dc30-8c95-4585-82e0-fc82de286a1c" containerName="cinder-api-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127008 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="7de90fe7-747a-4334-be2a-d3b5ee6b8148" containerName="nova-cell1-conductor-conductor" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127031 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="container-auditor" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127054 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="1132a0d0-bc9b-430d-a89e-33455c763b3c" containerName="openstack-network-exporter" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127082 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="78a24140-d3a5-463a-aaf9-49857f14decc" containerName="proxy-httpd" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127101 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="rsync" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127116 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="05220712-c8ae-4ac8-9c49-d74770367b33" containerName="swift-recon-cron" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127135 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="daa5c33b-d941-4030-bf9f-cd6ed831986e" containerName="glance-httpd" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127154 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="32e89ff3-0d60-4fd1-9c0d-831b92311165" containerName="sg-core" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127171 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="74db136d-3445-4a7e-bcae-4645888ec806" containerName="ovn-controller" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127199 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd16c866-91b0-4261-a084-7a96ac597c04" containerName="memcached" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127221 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="1dfeacf8-a072-4b44-bed9-618acd31fb6f" containerName="nova-metadata-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127244 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="1132a0d0-bc9b-430d-a89e-33455c763b3c" containerName="ovsdbserver-sb" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127268 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="1dfeacf8-a072-4b44-bed9-618acd31fb6f" containerName="nova-metadata-metadata" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127288 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c301c8b-acb9-4008-9832-ce83dc524b6d" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127313 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cb0de42-0a4d-4fbf-bcbb-5f46c46cb345" containerName="ovs-vswitchd" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127334 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bd07ab2-973f-4531-8e5f-68d349e231b4" containerName="barbican-api" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127353 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="044b7af9-01a7-40c1-803c-30e568aaf1fe" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127375 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="7eeb278b-517f-4b26-825e-12d7d0d969ce" containerName="neutron-httpd" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127402 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="411ad591-8dad-46ef-8a44-88e86f5c86dd" containerName="placement-log" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127423 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="7eeb278b-517f-4b26-825e-12d7d0d969ce" containerName="neutron-api" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.127435 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2bef7e3-d6fe-48f7-9ec5-a9c89f0e1349" containerName="mariadb-account-delete" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.129920 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.155584 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6j8c5"] Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.298564 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-catalog-content\") pod \"redhat-marketplace-6j8c5\" (UID: \"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0\") " pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.299008 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dbsr\" (UniqueName: \"kubernetes.io/projected/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-kube-api-access-7dbsr\") pod \"redhat-marketplace-6j8c5\" (UID: \"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0\") " pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.299096 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-utilities\") pod \"redhat-marketplace-6j8c5\" (UID: \"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0\") " pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.400719 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-catalog-content\") pod \"redhat-marketplace-6j8c5\" (UID: \"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0\") " pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.400812 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dbsr\" (UniqueName: \"kubernetes.io/projected/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-kube-api-access-7dbsr\") pod \"redhat-marketplace-6j8c5\" (UID: \"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0\") " pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.401347 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-utilities\") pod \"redhat-marketplace-6j8c5\" (UID: \"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0\") " pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.401530 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-catalog-content\") pod \"redhat-marketplace-6j8c5\" (UID: \"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0\") " pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.401733 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-utilities\") pod \"redhat-marketplace-6j8c5\" (UID: \"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0\") " pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.438147 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dbsr\" (UniqueName: \"kubernetes.io/projected/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-kube-api-access-7dbsr\") pod \"redhat-marketplace-6j8c5\" (UID: \"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0\") " pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.470598 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.907355 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6j8c5"] Oct 06 21:54:55 crc kubenswrapper[5014]: W1006 21:54:55.915240 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8dd18544_8a0e_4220_80bf_0f38cb7ee5a0.slice/crio-7c917f986c777af0ced0bede78e83ea730c3cdfb2e04e33d35914017d899c738 WatchSource:0}: Error finding container 7c917f986c777af0ced0bede78e83ea730c3cdfb2e04e33d35914017d899c738: Status 404 returned error can't find the container with id 7c917f986c777af0ced0bede78e83ea730c3cdfb2e04e33d35914017d899c738 Oct 06 21:54:55 crc kubenswrapper[5014]: I1006 21:54:55.935274 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6j8c5" event={"ID":"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0","Type":"ContainerStarted","Data":"7c917f986c777af0ced0bede78e83ea730c3cdfb2e04e33d35914017d899c738"} Oct 06 21:54:56 crc kubenswrapper[5014]: I1006 21:54:56.949204 5014 generic.go:334] "Generic (PLEG): container finished" podID="8dd18544-8a0e-4220-80bf-0f38cb7ee5a0" containerID="54f102f1d64526e0f40c6edb9ad809a0e415c987dfe1692164da280e78c70111" exitCode=0 Oct 06 21:54:56 crc kubenswrapper[5014]: I1006 21:54:56.949277 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6j8c5" event={"ID":"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0","Type":"ContainerDied","Data":"54f102f1d64526e0f40c6edb9ad809a0e415c987dfe1692164da280e78c70111"} Oct 06 21:54:56 crc kubenswrapper[5014]: I1006 21:54:56.952491 5014 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 06 21:54:57 crc kubenswrapper[5014]: I1006 21:54:57.694154 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pdm24"] Oct 06 21:54:57 crc kubenswrapper[5014]: I1006 21:54:57.702847 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:54:57 crc kubenswrapper[5014]: I1006 21:54:57.712797 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pdm24"] Oct 06 21:54:57 crc kubenswrapper[5014]: I1006 21:54:57.838639 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb825f11-f941-48db-af14-b2598f0082f4-catalog-content\") pod \"certified-operators-pdm24\" (UID: \"eb825f11-f941-48db-af14-b2598f0082f4\") " pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:54:57 crc kubenswrapper[5014]: I1006 21:54:57.838689 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb825f11-f941-48db-af14-b2598f0082f4-utilities\") pod \"certified-operators-pdm24\" (UID: \"eb825f11-f941-48db-af14-b2598f0082f4\") " pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:54:57 crc kubenswrapper[5014]: I1006 21:54:57.838886 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8q2x4\" (UniqueName: \"kubernetes.io/projected/eb825f11-f941-48db-af14-b2598f0082f4-kube-api-access-8q2x4\") pod \"certified-operators-pdm24\" (UID: \"eb825f11-f941-48db-af14-b2598f0082f4\") " pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:54:57 crc kubenswrapper[5014]: I1006 21:54:57.940688 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb825f11-f941-48db-af14-b2598f0082f4-catalog-content\") pod \"certified-operators-pdm24\" (UID: \"eb825f11-f941-48db-af14-b2598f0082f4\") " pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:54:57 crc kubenswrapper[5014]: I1006 21:54:57.941020 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb825f11-f941-48db-af14-b2598f0082f4-utilities\") pod \"certified-operators-pdm24\" (UID: \"eb825f11-f941-48db-af14-b2598f0082f4\") " pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:54:57 crc kubenswrapper[5014]: I1006 21:54:57.941070 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8q2x4\" (UniqueName: \"kubernetes.io/projected/eb825f11-f941-48db-af14-b2598f0082f4-kube-api-access-8q2x4\") pod \"certified-operators-pdm24\" (UID: \"eb825f11-f941-48db-af14-b2598f0082f4\") " pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:54:57 crc kubenswrapper[5014]: I1006 21:54:57.941195 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb825f11-f941-48db-af14-b2598f0082f4-catalog-content\") pod \"certified-operators-pdm24\" (UID: \"eb825f11-f941-48db-af14-b2598f0082f4\") " pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:54:57 crc kubenswrapper[5014]: I1006 21:54:57.941521 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb825f11-f941-48db-af14-b2598f0082f4-utilities\") pod \"certified-operators-pdm24\" (UID: \"eb825f11-f941-48db-af14-b2598f0082f4\") " pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:54:57 crc kubenswrapper[5014]: I1006 21:54:57.963242 5014 generic.go:334] "Generic (PLEG): container finished" podID="8dd18544-8a0e-4220-80bf-0f38cb7ee5a0" containerID="bd819870fde505a3fcbeaada67d1c4a83298379265f22cfa7673ae50fc725b3c" exitCode=0 Oct 06 21:54:57 crc kubenswrapper[5014]: I1006 21:54:57.963288 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6j8c5" event={"ID":"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0","Type":"ContainerDied","Data":"bd819870fde505a3fcbeaada67d1c4a83298379265f22cfa7673ae50fc725b3c"} Oct 06 21:54:57 crc kubenswrapper[5014]: I1006 21:54:57.977104 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8q2x4\" (UniqueName: \"kubernetes.io/projected/eb825f11-f941-48db-af14-b2598f0082f4-kube-api-access-8q2x4\") pod \"certified-operators-pdm24\" (UID: \"eb825f11-f941-48db-af14-b2598f0082f4\") " pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:54:58 crc kubenswrapper[5014]: I1006 21:54:58.022867 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:54:58 crc kubenswrapper[5014]: I1006 21:54:58.348631 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pdm24"] Oct 06 21:54:58 crc kubenswrapper[5014]: I1006 21:54:58.978842 5014 generic.go:334] "Generic (PLEG): container finished" podID="eb825f11-f941-48db-af14-b2598f0082f4" containerID="de5468a787adfe91291f0c8987bad47b369c2d502b68602b2fe7ce7f83d72937" exitCode=0 Oct 06 21:54:58 crc kubenswrapper[5014]: I1006 21:54:58.978964 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdm24" event={"ID":"eb825f11-f941-48db-af14-b2598f0082f4","Type":"ContainerDied","Data":"de5468a787adfe91291f0c8987bad47b369c2d502b68602b2fe7ce7f83d72937"} Oct 06 21:54:58 crc kubenswrapper[5014]: I1006 21:54:58.979015 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdm24" event={"ID":"eb825f11-f941-48db-af14-b2598f0082f4","Type":"ContainerStarted","Data":"eddb29ce4f6bcb052a3050f19b07231e5c6d9ec4974e805a2393022d75b77950"} Oct 06 21:54:58 crc kubenswrapper[5014]: I1006 21:54:58.985254 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6j8c5" event={"ID":"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0","Type":"ContainerStarted","Data":"9b015c6b762d11b0e4e834615c22dffc7c209c48a61dc11ceb0329094597dd4c"} Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.031598 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6j8c5" podStartSLOduration=2.601608818 podStartE2EDuration="4.031571173s" podCreationTimestamp="2025-10-06 21:54:55 +0000 UTC" firstStartedPulling="2025-10-06 21:54:56.952052175 +0000 UTC m=+1442.245088939" lastFinishedPulling="2025-10-06 21:54:58.38201456 +0000 UTC m=+1443.675051294" observedRunningTime="2025-10-06 21:54:59.023928172 +0000 UTC m=+1444.316964906" watchObservedRunningTime="2025-10-06 21:54:59.031571173 +0000 UTC m=+1444.324607937" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.153288 5014 scope.go:117] "RemoveContainer" containerID="1959b77a570f91cdaf5697d5f0178928de8cdf98024f0dde0600e72f93b3a946" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.176887 5014 scope.go:117] "RemoveContainer" containerID="a0b16ec100c37d0a7dab766417c9bd17c325284f3a24595bd52dfb7036ee7536" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.198166 5014 scope.go:117] "RemoveContainer" containerID="bf464222d39ef12e38a56f26482daae9287d8c9042a0d329a039052b127e923d" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.224397 5014 scope.go:117] "RemoveContainer" containerID="2eaef8680c09830198d88643a8f0a39fc12acfe60502da9bd0cbfac3d647db18" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.273446 5014 scope.go:117] "RemoveContainer" containerID="eccd817ee0632125e243a540a43507cb9e89329aba9b2105894b28f4cc60d162" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.302463 5014 scope.go:117] "RemoveContainer" containerID="59c2238484a6521933fa9031cd9b5dcc70ad7ec740e5a0296966fba0fa91301e" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.332865 5014 scope.go:117] "RemoveContainer" containerID="75260f5fd71272b8cf58b4b12873343b77691304e9712b86843406280124f279" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.352827 5014 scope.go:117] "RemoveContainer" containerID="034cf2f15c26b5ad5bd392b82b653d9607968a06ea6a5acdbdf60bb484c7ddfd" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.369706 5014 scope.go:117] "RemoveContainer" containerID="03501a4428cc493bd8ed16ab19651e2cdf11e53ac3b24caf94af764503a3b0a1" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.388277 5014 scope.go:117] "RemoveContainer" containerID="1d44e59a2f427f4344ed685e43320717bbfc68cd82e91134294275a2865561e9" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.423159 5014 scope.go:117] "RemoveContainer" containerID="f692dfcc2492eae5404168857917e870a27a027006ac51db96b1400629f12e89" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.442303 5014 scope.go:117] "RemoveContainer" containerID="40dcdb5d7c50cb4d2ec51f4ef5316c1e1fba2901ad8cf7db90116b6c873b152a" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.468222 5014 scope.go:117] "RemoveContainer" containerID="db7cc9b68ef3b4b78479e40f31e675c8f6b9a9f82735c43dd4837eaf2ba328d2" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.502280 5014 scope.go:117] "RemoveContainer" containerID="a65f05bfe49a06b41ce8f1ed83aafd17d0fda384fbadf3ce3c9e2d53641f8290" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.521089 5014 scope.go:117] "RemoveContainer" containerID="b1ed3d8475b8964f1ec03d5f52097548f4dc541c1ee4d2d12bb0acf7fc254d27" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.541195 5014 scope.go:117] "RemoveContainer" containerID="1dc17faa00611299d42adc11ce138f3ba8363c30f35a27bbcfbaadfdf3a44df1" Oct 06 21:54:59 crc kubenswrapper[5014]: I1006 21:54:59.566991 5014 scope.go:117] "RemoveContainer" containerID="f45765ebde0f9dc12d53e796e701e54e1c7970b480a74ce9768eb092f10479b4" Oct 06 21:55:01 crc kubenswrapper[5014]: I1006 21:55:01.012548 5014 generic.go:334] "Generic (PLEG): container finished" podID="eb825f11-f941-48db-af14-b2598f0082f4" containerID="72b59417df5450e110dd864cf097ded895a1d0b25d4ab92fcd4aaf0fdfd075da" exitCode=0 Oct 06 21:55:01 crc kubenswrapper[5014]: I1006 21:55:01.012651 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdm24" event={"ID":"eb825f11-f941-48db-af14-b2598f0082f4","Type":"ContainerDied","Data":"72b59417df5450e110dd864cf097ded895a1d0b25d4ab92fcd4aaf0fdfd075da"} Oct 06 21:55:02 crc kubenswrapper[5014]: I1006 21:55:02.022749 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdm24" event={"ID":"eb825f11-f941-48db-af14-b2598f0082f4","Type":"ContainerStarted","Data":"6eff39a5e02da55c53dfeacaa9fa197028b1724df296d1881113eeeef0ff642f"} Oct 06 21:55:02 crc kubenswrapper[5014]: I1006 21:55:02.044371 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pdm24" podStartSLOduration=2.532561298 podStartE2EDuration="5.044351957s" podCreationTimestamp="2025-10-06 21:54:57 +0000 UTC" firstStartedPulling="2025-10-06 21:54:58.981810276 +0000 UTC m=+1444.274847020" lastFinishedPulling="2025-10-06 21:55:01.493600945 +0000 UTC m=+1446.786637679" observedRunningTime="2025-10-06 21:55:02.04191558 +0000 UTC m=+1447.334952324" watchObservedRunningTime="2025-10-06 21:55:02.044351957 +0000 UTC m=+1447.337388691" Oct 06 21:55:05 crc kubenswrapper[5014]: I1006 21:55:05.471714 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:55:05 crc kubenswrapper[5014]: I1006 21:55:05.472245 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:55:05 crc kubenswrapper[5014]: I1006 21:55:05.545203 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:55:06 crc kubenswrapper[5014]: I1006 21:55:06.097922 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:55:06 crc kubenswrapper[5014]: I1006 21:55:06.147752 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6j8c5"] Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.023516 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.023974 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.077201 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6j8c5" podUID="8dd18544-8a0e-4220-80bf-0f38cb7ee5a0" containerName="registry-server" containerID="cri-o://9b015c6b762d11b0e4e834615c22dffc7c209c48a61dc11ceb0329094597dd4c" gracePeriod=2 Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.086231 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.145257 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.200803 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-p2fzw"] Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.202147 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.237484 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-p2fzw"] Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.293968 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctqmf\" (UniqueName: \"kubernetes.io/projected/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-kube-api-access-ctqmf\") pod \"community-operators-p2fzw\" (UID: \"083fdf75-9ab4-4b9f-9a29-e905459ba7cf\") " pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.294114 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-catalog-content\") pod \"community-operators-p2fzw\" (UID: \"083fdf75-9ab4-4b9f-9a29-e905459ba7cf\") " pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.294203 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-utilities\") pod \"community-operators-p2fzw\" (UID: \"083fdf75-9ab4-4b9f-9a29-e905459ba7cf\") " pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.395284 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctqmf\" (UniqueName: \"kubernetes.io/projected/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-kube-api-access-ctqmf\") pod \"community-operators-p2fzw\" (UID: \"083fdf75-9ab4-4b9f-9a29-e905459ba7cf\") " pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.395659 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-catalog-content\") pod \"community-operators-p2fzw\" (UID: \"083fdf75-9ab4-4b9f-9a29-e905459ba7cf\") " pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.395695 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-utilities\") pod \"community-operators-p2fzw\" (UID: \"083fdf75-9ab4-4b9f-9a29-e905459ba7cf\") " pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.396060 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-utilities\") pod \"community-operators-p2fzw\" (UID: \"083fdf75-9ab4-4b9f-9a29-e905459ba7cf\") " pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.396258 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-catalog-content\") pod \"community-operators-p2fzw\" (UID: \"083fdf75-9ab4-4b9f-9a29-e905459ba7cf\") " pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.420142 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctqmf\" (UniqueName: \"kubernetes.io/projected/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-kube-api-access-ctqmf\") pod \"community-operators-p2fzw\" (UID: \"083fdf75-9ab4-4b9f-9a29-e905459ba7cf\") " pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.536381 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.561241 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.702190 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-catalog-content\") pod \"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0\" (UID: \"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0\") " Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.702232 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-utilities\") pod \"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0\" (UID: \"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0\") " Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.702281 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7dbsr\" (UniqueName: \"kubernetes.io/projected/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-kube-api-access-7dbsr\") pod \"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0\" (UID: \"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0\") " Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.704579 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-utilities" (OuterVolumeSpecName: "utilities") pod "8dd18544-8a0e-4220-80bf-0f38cb7ee5a0" (UID: "8dd18544-8a0e-4220-80bf-0f38cb7ee5a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.710282 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-kube-api-access-7dbsr" (OuterVolumeSpecName: "kube-api-access-7dbsr") pod "8dd18544-8a0e-4220-80bf-0f38cb7ee5a0" (UID: "8dd18544-8a0e-4220-80bf-0f38cb7ee5a0"). InnerVolumeSpecName "kube-api-access-7dbsr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.728558 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8dd18544-8a0e-4220-80bf-0f38cb7ee5a0" (UID: "8dd18544-8a0e-4220-80bf-0f38cb7ee5a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.804215 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.804284 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.804298 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7dbsr\" (UniqueName: \"kubernetes.io/projected/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0-kube-api-access-7dbsr\") on node \"crc\" DevicePath \"\"" Oct 06 21:55:08 crc kubenswrapper[5014]: W1006 21:55:08.851180 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod083fdf75_9ab4_4b9f_9a29_e905459ba7cf.slice/crio-0f73c9652bcc7130669e7a6490fe60b035436b24feaf4fadd7766821022a3f43 WatchSource:0}: Error finding container 0f73c9652bcc7130669e7a6490fe60b035436b24feaf4fadd7766821022a3f43: Status 404 returned error can't find the container with id 0f73c9652bcc7130669e7a6490fe60b035436b24feaf4fadd7766821022a3f43 Oct 06 21:55:08 crc kubenswrapper[5014]: I1006 21:55:08.858657 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-p2fzw"] Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.088857 5014 generic.go:334] "Generic (PLEG): container finished" podID="8dd18544-8a0e-4220-80bf-0f38cb7ee5a0" containerID="9b015c6b762d11b0e4e834615c22dffc7c209c48a61dc11ceb0329094597dd4c" exitCode=0 Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.088967 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6j8c5" Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.088977 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6j8c5" event={"ID":"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0","Type":"ContainerDied","Data":"9b015c6b762d11b0e4e834615c22dffc7c209c48a61dc11ceb0329094597dd4c"} Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.089306 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6j8c5" event={"ID":"8dd18544-8a0e-4220-80bf-0f38cb7ee5a0","Type":"ContainerDied","Data":"7c917f986c777af0ced0bede78e83ea730c3cdfb2e04e33d35914017d899c738"} Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.089333 5014 scope.go:117] "RemoveContainer" containerID="9b015c6b762d11b0e4e834615c22dffc7c209c48a61dc11ceb0329094597dd4c" Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.091126 5014 generic.go:334] "Generic (PLEG): container finished" podID="083fdf75-9ab4-4b9f-9a29-e905459ba7cf" containerID="9cb8f8ad243dea60554e716e032a69bd9b38113a21baee8aab37200fe7afbf43" exitCode=0 Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.091188 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p2fzw" event={"ID":"083fdf75-9ab4-4b9f-9a29-e905459ba7cf","Type":"ContainerDied","Data":"9cb8f8ad243dea60554e716e032a69bd9b38113a21baee8aab37200fe7afbf43"} Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.091218 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p2fzw" event={"ID":"083fdf75-9ab4-4b9f-9a29-e905459ba7cf","Type":"ContainerStarted","Data":"0f73c9652bcc7130669e7a6490fe60b035436b24feaf4fadd7766821022a3f43"} Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.107588 5014 scope.go:117] "RemoveContainer" containerID="bd819870fde505a3fcbeaada67d1c4a83298379265f22cfa7673ae50fc725b3c" Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.126921 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6j8c5"] Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.135965 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6j8c5"] Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.142742 5014 scope.go:117] "RemoveContainer" containerID="54f102f1d64526e0f40c6edb9ad809a0e415c987dfe1692164da280e78c70111" Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.160719 5014 scope.go:117] "RemoveContainer" containerID="9b015c6b762d11b0e4e834615c22dffc7c209c48a61dc11ceb0329094597dd4c" Oct 06 21:55:09 crc kubenswrapper[5014]: E1006 21:55:09.161228 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b015c6b762d11b0e4e834615c22dffc7c209c48a61dc11ceb0329094597dd4c\": container with ID starting with 9b015c6b762d11b0e4e834615c22dffc7c209c48a61dc11ceb0329094597dd4c not found: ID does not exist" containerID="9b015c6b762d11b0e4e834615c22dffc7c209c48a61dc11ceb0329094597dd4c" Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.161278 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b015c6b762d11b0e4e834615c22dffc7c209c48a61dc11ceb0329094597dd4c"} err="failed to get container status \"9b015c6b762d11b0e4e834615c22dffc7c209c48a61dc11ceb0329094597dd4c\": rpc error: code = NotFound desc = could not find container \"9b015c6b762d11b0e4e834615c22dffc7c209c48a61dc11ceb0329094597dd4c\": container with ID starting with 9b015c6b762d11b0e4e834615c22dffc7c209c48a61dc11ceb0329094597dd4c not found: ID does not exist" Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.161312 5014 scope.go:117] "RemoveContainer" containerID="bd819870fde505a3fcbeaada67d1c4a83298379265f22cfa7673ae50fc725b3c" Oct 06 21:55:09 crc kubenswrapper[5014]: E1006 21:55:09.161539 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd819870fde505a3fcbeaada67d1c4a83298379265f22cfa7673ae50fc725b3c\": container with ID starting with bd819870fde505a3fcbeaada67d1c4a83298379265f22cfa7673ae50fc725b3c not found: ID does not exist" containerID="bd819870fde505a3fcbeaada67d1c4a83298379265f22cfa7673ae50fc725b3c" Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.161564 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd819870fde505a3fcbeaada67d1c4a83298379265f22cfa7673ae50fc725b3c"} err="failed to get container status \"bd819870fde505a3fcbeaada67d1c4a83298379265f22cfa7673ae50fc725b3c\": rpc error: code = NotFound desc = could not find container \"bd819870fde505a3fcbeaada67d1c4a83298379265f22cfa7673ae50fc725b3c\": container with ID starting with bd819870fde505a3fcbeaada67d1c4a83298379265f22cfa7673ae50fc725b3c not found: ID does not exist" Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.161581 5014 scope.go:117] "RemoveContainer" containerID="54f102f1d64526e0f40c6edb9ad809a0e415c987dfe1692164da280e78c70111" Oct 06 21:55:09 crc kubenswrapper[5014]: E1006 21:55:09.161799 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54f102f1d64526e0f40c6edb9ad809a0e415c987dfe1692164da280e78c70111\": container with ID starting with 54f102f1d64526e0f40c6edb9ad809a0e415c987dfe1692164da280e78c70111 not found: ID does not exist" containerID="54f102f1d64526e0f40c6edb9ad809a0e415c987dfe1692164da280e78c70111" Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.161826 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54f102f1d64526e0f40c6edb9ad809a0e415c987dfe1692164da280e78c70111"} err="failed to get container status \"54f102f1d64526e0f40c6edb9ad809a0e415c987dfe1692164da280e78c70111\": rpc error: code = NotFound desc = could not find container \"54f102f1d64526e0f40c6edb9ad809a0e415c987dfe1692164da280e78c70111\": container with ID starting with 54f102f1d64526e0f40c6edb9ad809a0e415c987dfe1692164da280e78c70111 not found: ID does not exist" Oct 06 21:55:09 crc kubenswrapper[5014]: I1006 21:55:09.501484 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8dd18544-8a0e-4220-80bf-0f38cb7ee5a0" path="/var/lib/kubelet/pods/8dd18544-8a0e-4220-80bf-0f38cb7ee5a0/volumes" Oct 06 21:55:10 crc kubenswrapper[5014]: I1006 21:55:10.394449 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pdm24"] Oct 06 21:55:10 crc kubenswrapper[5014]: I1006 21:55:10.395216 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pdm24" podUID="eb825f11-f941-48db-af14-b2598f0082f4" containerName="registry-server" containerID="cri-o://6eff39a5e02da55c53dfeacaa9fa197028b1724df296d1881113eeeef0ff642f" gracePeriod=2 Oct 06 21:55:10 crc kubenswrapper[5014]: I1006 21:55:10.890382 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.046515 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb825f11-f941-48db-af14-b2598f0082f4-utilities\") pod \"eb825f11-f941-48db-af14-b2598f0082f4\" (UID: \"eb825f11-f941-48db-af14-b2598f0082f4\") " Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.046755 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb825f11-f941-48db-af14-b2598f0082f4-catalog-content\") pod \"eb825f11-f941-48db-af14-b2598f0082f4\" (UID: \"eb825f11-f941-48db-af14-b2598f0082f4\") " Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.046923 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8q2x4\" (UniqueName: \"kubernetes.io/projected/eb825f11-f941-48db-af14-b2598f0082f4-kube-api-access-8q2x4\") pod \"eb825f11-f941-48db-af14-b2598f0082f4\" (UID: \"eb825f11-f941-48db-af14-b2598f0082f4\") " Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.048220 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb825f11-f941-48db-af14-b2598f0082f4-utilities" (OuterVolumeSpecName: "utilities") pod "eb825f11-f941-48db-af14-b2598f0082f4" (UID: "eb825f11-f941-48db-af14-b2598f0082f4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.048597 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb825f11-f941-48db-af14-b2598f0082f4-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.059675 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb825f11-f941-48db-af14-b2598f0082f4-kube-api-access-8q2x4" (OuterVolumeSpecName: "kube-api-access-8q2x4") pod "eb825f11-f941-48db-af14-b2598f0082f4" (UID: "eb825f11-f941-48db-af14-b2598f0082f4"). InnerVolumeSpecName "kube-api-access-8q2x4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.118283 5014 generic.go:334] "Generic (PLEG): container finished" podID="083fdf75-9ab4-4b9f-9a29-e905459ba7cf" containerID="2a7ab6a355087b025ade6dcce73dbeaa829f71a68674eb3b12920294cdc1ea33" exitCode=0 Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.118353 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p2fzw" event={"ID":"083fdf75-9ab4-4b9f-9a29-e905459ba7cf","Type":"ContainerDied","Data":"2a7ab6a355087b025ade6dcce73dbeaa829f71a68674eb3b12920294cdc1ea33"} Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.121401 5014 generic.go:334] "Generic (PLEG): container finished" podID="eb825f11-f941-48db-af14-b2598f0082f4" containerID="6eff39a5e02da55c53dfeacaa9fa197028b1724df296d1881113eeeef0ff642f" exitCode=0 Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.121426 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdm24" event={"ID":"eb825f11-f941-48db-af14-b2598f0082f4","Type":"ContainerDied","Data":"6eff39a5e02da55c53dfeacaa9fa197028b1724df296d1881113eeeef0ff642f"} Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.121446 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pdm24" event={"ID":"eb825f11-f941-48db-af14-b2598f0082f4","Type":"ContainerDied","Data":"eddb29ce4f6bcb052a3050f19b07231e5c6d9ec4974e805a2393022d75b77950"} Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.121465 5014 scope.go:117] "RemoveContainer" containerID="6eff39a5e02da55c53dfeacaa9fa197028b1724df296d1881113eeeef0ff642f" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.121587 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pdm24" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.129563 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb825f11-f941-48db-af14-b2598f0082f4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eb825f11-f941-48db-af14-b2598f0082f4" (UID: "eb825f11-f941-48db-af14-b2598f0082f4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.150839 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8q2x4\" (UniqueName: \"kubernetes.io/projected/eb825f11-f941-48db-af14-b2598f0082f4-kube-api-access-8q2x4\") on node \"crc\" DevicePath \"\"" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.150892 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb825f11-f941-48db-af14-b2598f0082f4-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.157124 5014 scope.go:117] "RemoveContainer" containerID="72b59417df5450e110dd864cf097ded895a1d0b25d4ab92fcd4aaf0fdfd075da" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.186414 5014 scope.go:117] "RemoveContainer" containerID="de5468a787adfe91291f0c8987bad47b369c2d502b68602b2fe7ce7f83d72937" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.211999 5014 scope.go:117] "RemoveContainer" containerID="6eff39a5e02da55c53dfeacaa9fa197028b1724df296d1881113eeeef0ff642f" Oct 06 21:55:11 crc kubenswrapper[5014]: E1006 21:55:11.212538 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6eff39a5e02da55c53dfeacaa9fa197028b1724df296d1881113eeeef0ff642f\": container with ID starting with 6eff39a5e02da55c53dfeacaa9fa197028b1724df296d1881113eeeef0ff642f not found: ID does not exist" containerID="6eff39a5e02da55c53dfeacaa9fa197028b1724df296d1881113eeeef0ff642f" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.212599 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6eff39a5e02da55c53dfeacaa9fa197028b1724df296d1881113eeeef0ff642f"} err="failed to get container status \"6eff39a5e02da55c53dfeacaa9fa197028b1724df296d1881113eeeef0ff642f\": rpc error: code = NotFound desc = could not find container \"6eff39a5e02da55c53dfeacaa9fa197028b1724df296d1881113eeeef0ff642f\": container with ID starting with 6eff39a5e02da55c53dfeacaa9fa197028b1724df296d1881113eeeef0ff642f not found: ID does not exist" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.212657 5014 scope.go:117] "RemoveContainer" containerID="72b59417df5450e110dd864cf097ded895a1d0b25d4ab92fcd4aaf0fdfd075da" Oct 06 21:55:11 crc kubenswrapper[5014]: E1006 21:55:11.213006 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72b59417df5450e110dd864cf097ded895a1d0b25d4ab92fcd4aaf0fdfd075da\": container with ID starting with 72b59417df5450e110dd864cf097ded895a1d0b25d4ab92fcd4aaf0fdfd075da not found: ID does not exist" containerID="72b59417df5450e110dd864cf097ded895a1d0b25d4ab92fcd4aaf0fdfd075da" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.213069 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72b59417df5450e110dd864cf097ded895a1d0b25d4ab92fcd4aaf0fdfd075da"} err="failed to get container status \"72b59417df5450e110dd864cf097ded895a1d0b25d4ab92fcd4aaf0fdfd075da\": rpc error: code = NotFound desc = could not find container \"72b59417df5450e110dd864cf097ded895a1d0b25d4ab92fcd4aaf0fdfd075da\": container with ID starting with 72b59417df5450e110dd864cf097ded895a1d0b25d4ab92fcd4aaf0fdfd075da not found: ID does not exist" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.213100 5014 scope.go:117] "RemoveContainer" containerID="de5468a787adfe91291f0c8987bad47b369c2d502b68602b2fe7ce7f83d72937" Oct 06 21:55:11 crc kubenswrapper[5014]: E1006 21:55:11.213856 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de5468a787adfe91291f0c8987bad47b369c2d502b68602b2fe7ce7f83d72937\": container with ID starting with de5468a787adfe91291f0c8987bad47b369c2d502b68602b2fe7ce7f83d72937 not found: ID does not exist" containerID="de5468a787adfe91291f0c8987bad47b369c2d502b68602b2fe7ce7f83d72937" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.213916 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de5468a787adfe91291f0c8987bad47b369c2d502b68602b2fe7ce7f83d72937"} err="failed to get container status \"de5468a787adfe91291f0c8987bad47b369c2d502b68602b2fe7ce7f83d72937\": rpc error: code = NotFound desc = could not find container \"de5468a787adfe91291f0c8987bad47b369c2d502b68602b2fe7ce7f83d72937\": container with ID starting with de5468a787adfe91291f0c8987bad47b369c2d502b68602b2fe7ce7f83d72937 not found: ID does not exist" Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.445607 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pdm24"] Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.453061 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pdm24"] Oct 06 21:55:11 crc kubenswrapper[5014]: I1006 21:55:11.500708 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb825f11-f941-48db-af14-b2598f0082f4" path="/var/lib/kubelet/pods/eb825f11-f941-48db-af14-b2598f0082f4/volumes" Oct 06 21:55:12 crc kubenswrapper[5014]: I1006 21:55:12.139359 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p2fzw" event={"ID":"083fdf75-9ab4-4b9f-9a29-e905459ba7cf","Type":"ContainerStarted","Data":"b359ccd37a3bb4cd782d1999c10c2fa8adde6905cfc43e47b4bb751845f112ef"} Oct 06 21:55:12 crc kubenswrapper[5014]: I1006 21:55:12.170958 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-p2fzw" podStartSLOduration=1.44562608 podStartE2EDuration="4.170934263s" podCreationTimestamp="2025-10-06 21:55:08 +0000 UTC" firstStartedPulling="2025-10-06 21:55:09.092501942 +0000 UTC m=+1454.385538686" lastFinishedPulling="2025-10-06 21:55:11.817810145 +0000 UTC m=+1457.110846869" observedRunningTime="2025-10-06 21:55:12.162996494 +0000 UTC m=+1457.456033238" watchObservedRunningTime="2025-10-06 21:55:12.170934263 +0000 UTC m=+1457.463971007" Oct 06 21:55:18 crc kubenswrapper[5014]: I1006 21:55:18.561774 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:18 crc kubenswrapper[5014]: I1006 21:55:18.562520 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:18 crc kubenswrapper[5014]: I1006 21:55:18.633126 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:19 crc kubenswrapper[5014]: I1006 21:55:19.289275 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:19 crc kubenswrapper[5014]: I1006 21:55:19.368250 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-p2fzw"] Oct 06 21:55:21 crc kubenswrapper[5014]: I1006 21:55:21.229040 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-p2fzw" podUID="083fdf75-9ab4-4b9f-9a29-e905459ba7cf" containerName="registry-server" containerID="cri-o://b359ccd37a3bb4cd782d1999c10c2fa8adde6905cfc43e47b4bb751845f112ef" gracePeriod=2 Oct 06 21:55:21 crc kubenswrapper[5014]: I1006 21:55:21.822787 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:21 crc kubenswrapper[5014]: I1006 21:55:21.909509 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctqmf\" (UniqueName: \"kubernetes.io/projected/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-kube-api-access-ctqmf\") pod \"083fdf75-9ab4-4b9f-9a29-e905459ba7cf\" (UID: \"083fdf75-9ab4-4b9f-9a29-e905459ba7cf\") " Oct 06 21:55:21 crc kubenswrapper[5014]: I1006 21:55:21.909574 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-catalog-content\") pod \"083fdf75-9ab4-4b9f-9a29-e905459ba7cf\" (UID: \"083fdf75-9ab4-4b9f-9a29-e905459ba7cf\") " Oct 06 21:55:21 crc kubenswrapper[5014]: I1006 21:55:21.909690 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-utilities\") pod \"083fdf75-9ab4-4b9f-9a29-e905459ba7cf\" (UID: \"083fdf75-9ab4-4b9f-9a29-e905459ba7cf\") " Oct 06 21:55:21 crc kubenswrapper[5014]: I1006 21:55:21.910704 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-utilities" (OuterVolumeSpecName: "utilities") pod "083fdf75-9ab4-4b9f-9a29-e905459ba7cf" (UID: "083fdf75-9ab4-4b9f-9a29-e905459ba7cf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:55:21 crc kubenswrapper[5014]: I1006 21:55:21.916996 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-kube-api-access-ctqmf" (OuterVolumeSpecName: "kube-api-access-ctqmf") pod "083fdf75-9ab4-4b9f-9a29-e905459ba7cf" (UID: "083fdf75-9ab4-4b9f-9a29-e905459ba7cf"). InnerVolumeSpecName "kube-api-access-ctqmf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:55:21 crc kubenswrapper[5014]: I1006 21:55:21.976848 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "083fdf75-9ab4-4b9f-9a29-e905459ba7cf" (UID: "083fdf75-9ab4-4b9f-9a29-e905459ba7cf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.011467 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctqmf\" (UniqueName: \"kubernetes.io/projected/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-kube-api-access-ctqmf\") on node \"crc\" DevicePath \"\"" Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.011508 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.011516 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/083fdf75-9ab4-4b9f-9a29-e905459ba7cf-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.246353 5014 generic.go:334] "Generic (PLEG): container finished" podID="083fdf75-9ab4-4b9f-9a29-e905459ba7cf" containerID="b359ccd37a3bb4cd782d1999c10c2fa8adde6905cfc43e47b4bb751845f112ef" exitCode=0 Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.246440 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p2fzw" event={"ID":"083fdf75-9ab4-4b9f-9a29-e905459ba7cf","Type":"ContainerDied","Data":"b359ccd37a3bb4cd782d1999c10c2fa8adde6905cfc43e47b4bb751845f112ef"} Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.246540 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p2fzw" event={"ID":"083fdf75-9ab4-4b9f-9a29-e905459ba7cf","Type":"ContainerDied","Data":"0f73c9652bcc7130669e7a6490fe60b035436b24feaf4fadd7766821022a3f43"} Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.246573 5014 scope.go:117] "RemoveContainer" containerID="b359ccd37a3bb4cd782d1999c10c2fa8adde6905cfc43e47b4bb751845f112ef" Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.246615 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p2fzw" Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.279596 5014 scope.go:117] "RemoveContainer" containerID="2a7ab6a355087b025ade6dcce73dbeaa829f71a68674eb3b12920294cdc1ea33" Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.314924 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-p2fzw"] Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.326217 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-p2fzw"] Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.338956 5014 scope.go:117] "RemoveContainer" containerID="9cb8f8ad243dea60554e716e032a69bd9b38113a21baee8aab37200fe7afbf43" Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.381823 5014 scope.go:117] "RemoveContainer" containerID="b359ccd37a3bb4cd782d1999c10c2fa8adde6905cfc43e47b4bb751845f112ef" Oct 06 21:55:22 crc kubenswrapper[5014]: E1006 21:55:22.382424 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b359ccd37a3bb4cd782d1999c10c2fa8adde6905cfc43e47b4bb751845f112ef\": container with ID starting with b359ccd37a3bb4cd782d1999c10c2fa8adde6905cfc43e47b4bb751845f112ef not found: ID does not exist" containerID="b359ccd37a3bb4cd782d1999c10c2fa8adde6905cfc43e47b4bb751845f112ef" Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.382462 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b359ccd37a3bb4cd782d1999c10c2fa8adde6905cfc43e47b4bb751845f112ef"} err="failed to get container status \"b359ccd37a3bb4cd782d1999c10c2fa8adde6905cfc43e47b4bb751845f112ef\": rpc error: code = NotFound desc = could not find container \"b359ccd37a3bb4cd782d1999c10c2fa8adde6905cfc43e47b4bb751845f112ef\": container with ID starting with b359ccd37a3bb4cd782d1999c10c2fa8adde6905cfc43e47b4bb751845f112ef not found: ID does not exist" Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.382490 5014 scope.go:117] "RemoveContainer" containerID="2a7ab6a355087b025ade6dcce73dbeaa829f71a68674eb3b12920294cdc1ea33" Oct 06 21:55:22 crc kubenswrapper[5014]: E1006 21:55:22.383171 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a7ab6a355087b025ade6dcce73dbeaa829f71a68674eb3b12920294cdc1ea33\": container with ID starting with 2a7ab6a355087b025ade6dcce73dbeaa829f71a68674eb3b12920294cdc1ea33 not found: ID does not exist" containerID="2a7ab6a355087b025ade6dcce73dbeaa829f71a68674eb3b12920294cdc1ea33" Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.383222 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a7ab6a355087b025ade6dcce73dbeaa829f71a68674eb3b12920294cdc1ea33"} err="failed to get container status \"2a7ab6a355087b025ade6dcce73dbeaa829f71a68674eb3b12920294cdc1ea33\": rpc error: code = NotFound desc = could not find container \"2a7ab6a355087b025ade6dcce73dbeaa829f71a68674eb3b12920294cdc1ea33\": container with ID starting with 2a7ab6a355087b025ade6dcce73dbeaa829f71a68674eb3b12920294cdc1ea33 not found: ID does not exist" Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.383257 5014 scope.go:117] "RemoveContainer" containerID="9cb8f8ad243dea60554e716e032a69bd9b38113a21baee8aab37200fe7afbf43" Oct 06 21:55:22 crc kubenswrapper[5014]: E1006 21:55:22.383766 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9cb8f8ad243dea60554e716e032a69bd9b38113a21baee8aab37200fe7afbf43\": container with ID starting with 9cb8f8ad243dea60554e716e032a69bd9b38113a21baee8aab37200fe7afbf43 not found: ID does not exist" containerID="9cb8f8ad243dea60554e716e032a69bd9b38113a21baee8aab37200fe7afbf43" Oct 06 21:55:22 crc kubenswrapper[5014]: I1006 21:55:22.383834 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cb8f8ad243dea60554e716e032a69bd9b38113a21baee8aab37200fe7afbf43"} err="failed to get container status \"9cb8f8ad243dea60554e716e032a69bd9b38113a21baee8aab37200fe7afbf43\": rpc error: code = NotFound desc = could not find container \"9cb8f8ad243dea60554e716e032a69bd9b38113a21baee8aab37200fe7afbf43\": container with ID starting with 9cb8f8ad243dea60554e716e032a69bd9b38113a21baee8aab37200fe7afbf43 not found: ID does not exist" Oct 06 21:55:23 crc kubenswrapper[5014]: I1006 21:55:23.496319 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="083fdf75-9ab4-4b9f-9a29-e905459ba7cf" path="/var/lib/kubelet/pods/083fdf75-9ab4-4b9f-9a29-e905459ba7cf/volumes" Oct 06 21:55:59 crc kubenswrapper[5014]: I1006 21:55:59.927735 5014 scope.go:117] "RemoveContainer" containerID="ca142d670e607e0c0275fd475dcb9a118d4430b891c9a73969f6dff49fe7e95f" Oct 06 21:55:59 crc kubenswrapper[5014]: I1006 21:55:59.978757 5014 scope.go:117] "RemoveContainer" containerID="366d9c7d12dc53287d03434a10b7448a9c6d02142fd4ec78034d67e5b49d8e4b" Oct 06 21:56:00 crc kubenswrapper[5014]: I1006 21:56:00.013025 5014 scope.go:117] "RemoveContainer" containerID="17ada03c06ef0ed42fb4509093ae61bf8a5112d17d914c8bebab906bb79f4014" Oct 06 21:56:00 crc kubenswrapper[5014]: I1006 21:56:00.065722 5014 scope.go:117] "RemoveContainer" containerID="4cf6f0f2d03df260425ec2e087b93c533a13a88a7decf7ca78408daf3686e46c" Oct 06 21:56:00 crc kubenswrapper[5014]: I1006 21:56:00.128680 5014 scope.go:117] "RemoveContainer" containerID="f8c2374c05a9acca51f59d8cc1f112466396ec752bdd6d59ce4f4833fadceac7" Oct 06 21:56:00 crc kubenswrapper[5014]: I1006 21:56:00.167779 5014 scope.go:117] "RemoveContainer" containerID="d027fc574ed3e1780f86823b64d54c340ae29116db94cb57a95a5b184ff616b8" Oct 06 21:56:00 crc kubenswrapper[5014]: I1006 21:56:00.222065 5014 scope.go:117] "RemoveContainer" containerID="d77fd7ac2112ded27147e4409627c93dcae3392a0e112526dd74d8acaacc61c5" Oct 06 21:57:00 crc kubenswrapper[5014]: I1006 21:57:00.449534 5014 scope.go:117] "RemoveContainer" containerID="c724c6629b1de99ef2ecd3c56620586eab0b64f00960198734d644642c218918" Oct 06 21:57:00 crc kubenswrapper[5014]: I1006 21:57:00.494411 5014 scope.go:117] "RemoveContainer" containerID="099e875c4a6824b8758649c5a1258b7544aa15f8c03b6558b91615a7b48a8016" Oct 06 21:57:00 crc kubenswrapper[5014]: I1006 21:57:00.563165 5014 scope.go:117] "RemoveContainer" containerID="27e08919fc63e9cbfab920a4f7742075cce87a6cd89ec57c9bd8e23ee7770c82" Oct 06 21:57:00 crc kubenswrapper[5014]: I1006 21:57:00.586314 5014 scope.go:117] "RemoveContainer" containerID="160fc036b53fde699bc900f96f91e07f236ff55c5d39308a474b175d76ca1bff" Oct 06 21:57:00 crc kubenswrapper[5014]: I1006 21:57:00.626247 5014 scope.go:117] "RemoveContainer" containerID="f275100bad062a3e3dac1f99ba18a85645c9e3a83e238cb17a68f9c0a6030257" Oct 06 21:57:00 crc kubenswrapper[5014]: I1006 21:57:00.657448 5014 scope.go:117] "RemoveContainer" containerID="1f2fab62bca88836e6746c95b9eff65328fee516558f263d1cb4288d17877aac" Oct 06 21:57:00 crc kubenswrapper[5014]: I1006 21:57:00.692250 5014 scope.go:117] "RemoveContainer" containerID="cade81ead119d9b5f5ca886d347c02f3c9e6e5e57f2785ee7fe756b12f2fc74f" Oct 06 21:57:21 crc kubenswrapper[5014]: I1006 21:57:21.735002 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:57:21 crc kubenswrapper[5014]: I1006 21:57:21.735541 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.668269 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kgpkk"] Oct 06 21:57:45 crc kubenswrapper[5014]: E1006 21:57:45.669216 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="083fdf75-9ab4-4b9f-9a29-e905459ba7cf" containerName="extract-content" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.669231 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="083fdf75-9ab4-4b9f-9a29-e905459ba7cf" containerName="extract-content" Oct 06 21:57:45 crc kubenswrapper[5014]: E1006 21:57:45.669246 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="083fdf75-9ab4-4b9f-9a29-e905459ba7cf" containerName="registry-server" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.669253 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="083fdf75-9ab4-4b9f-9a29-e905459ba7cf" containerName="registry-server" Oct 06 21:57:45 crc kubenswrapper[5014]: E1006 21:57:45.669275 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="083fdf75-9ab4-4b9f-9a29-e905459ba7cf" containerName="extract-utilities" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.669283 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="083fdf75-9ab4-4b9f-9a29-e905459ba7cf" containerName="extract-utilities" Oct 06 21:57:45 crc kubenswrapper[5014]: E1006 21:57:45.669298 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb825f11-f941-48db-af14-b2598f0082f4" containerName="extract-content" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.669306 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb825f11-f941-48db-af14-b2598f0082f4" containerName="extract-content" Oct 06 21:57:45 crc kubenswrapper[5014]: E1006 21:57:45.669318 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dd18544-8a0e-4220-80bf-0f38cb7ee5a0" containerName="registry-server" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.669326 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dd18544-8a0e-4220-80bf-0f38cb7ee5a0" containerName="registry-server" Oct 06 21:57:45 crc kubenswrapper[5014]: E1006 21:57:45.669343 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb825f11-f941-48db-af14-b2598f0082f4" containerName="extract-utilities" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.669350 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb825f11-f941-48db-af14-b2598f0082f4" containerName="extract-utilities" Oct 06 21:57:45 crc kubenswrapper[5014]: E1006 21:57:45.669367 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb825f11-f941-48db-af14-b2598f0082f4" containerName="registry-server" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.669375 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb825f11-f941-48db-af14-b2598f0082f4" containerName="registry-server" Oct 06 21:57:45 crc kubenswrapper[5014]: E1006 21:57:45.669397 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dd18544-8a0e-4220-80bf-0f38cb7ee5a0" containerName="extract-content" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.669405 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dd18544-8a0e-4220-80bf-0f38cb7ee5a0" containerName="extract-content" Oct 06 21:57:45 crc kubenswrapper[5014]: E1006 21:57:45.669417 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dd18544-8a0e-4220-80bf-0f38cb7ee5a0" containerName="extract-utilities" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.669425 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dd18544-8a0e-4220-80bf-0f38cb7ee5a0" containerName="extract-utilities" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.669585 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="8dd18544-8a0e-4220-80bf-0f38cb7ee5a0" containerName="registry-server" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.669609 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="083fdf75-9ab4-4b9f-9a29-e905459ba7cf" containerName="registry-server" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.669646 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb825f11-f941-48db-af14-b2598f0082f4" containerName="registry-server" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.670960 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.691494 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kgpkk"] Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.755878 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44a26db-3445-4786-8850-2ab3a5139ee2-catalog-content\") pod \"redhat-operators-kgpkk\" (UID: \"e44a26db-3445-4786-8850-2ab3a5139ee2\") " pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.755971 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkszn\" (UniqueName: \"kubernetes.io/projected/e44a26db-3445-4786-8850-2ab3a5139ee2-kube-api-access-wkszn\") pod \"redhat-operators-kgpkk\" (UID: \"e44a26db-3445-4786-8850-2ab3a5139ee2\") " pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.756036 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44a26db-3445-4786-8850-2ab3a5139ee2-utilities\") pod \"redhat-operators-kgpkk\" (UID: \"e44a26db-3445-4786-8850-2ab3a5139ee2\") " pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.857350 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkszn\" (UniqueName: \"kubernetes.io/projected/e44a26db-3445-4786-8850-2ab3a5139ee2-kube-api-access-wkszn\") pod \"redhat-operators-kgpkk\" (UID: \"e44a26db-3445-4786-8850-2ab3a5139ee2\") " pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.857454 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44a26db-3445-4786-8850-2ab3a5139ee2-utilities\") pod \"redhat-operators-kgpkk\" (UID: \"e44a26db-3445-4786-8850-2ab3a5139ee2\") " pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.857493 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44a26db-3445-4786-8850-2ab3a5139ee2-catalog-content\") pod \"redhat-operators-kgpkk\" (UID: \"e44a26db-3445-4786-8850-2ab3a5139ee2\") " pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.858068 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44a26db-3445-4786-8850-2ab3a5139ee2-catalog-content\") pod \"redhat-operators-kgpkk\" (UID: \"e44a26db-3445-4786-8850-2ab3a5139ee2\") " pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.858811 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44a26db-3445-4786-8850-2ab3a5139ee2-utilities\") pod \"redhat-operators-kgpkk\" (UID: \"e44a26db-3445-4786-8850-2ab3a5139ee2\") " pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:57:45 crc kubenswrapper[5014]: I1006 21:57:45.890823 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkszn\" (UniqueName: \"kubernetes.io/projected/e44a26db-3445-4786-8850-2ab3a5139ee2-kube-api-access-wkszn\") pod \"redhat-operators-kgpkk\" (UID: \"e44a26db-3445-4786-8850-2ab3a5139ee2\") " pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:57:46 crc kubenswrapper[5014]: I1006 21:57:46.025276 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:57:46 crc kubenswrapper[5014]: I1006 21:57:46.454942 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kgpkk"] Oct 06 21:57:46 crc kubenswrapper[5014]: I1006 21:57:46.751386 5014 generic.go:334] "Generic (PLEG): container finished" podID="e44a26db-3445-4786-8850-2ab3a5139ee2" containerID="b529d4e20d1238c5c31dd96c650c947dbc1a490f138c087e683fd4609b200730" exitCode=0 Oct 06 21:57:46 crc kubenswrapper[5014]: I1006 21:57:46.751553 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kgpkk" event={"ID":"e44a26db-3445-4786-8850-2ab3a5139ee2","Type":"ContainerDied","Data":"b529d4e20d1238c5c31dd96c650c947dbc1a490f138c087e683fd4609b200730"} Oct 06 21:57:46 crc kubenswrapper[5014]: I1006 21:57:46.751792 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kgpkk" event={"ID":"e44a26db-3445-4786-8850-2ab3a5139ee2","Type":"ContainerStarted","Data":"5a5ece1091b5e65cf341ffc539c1f7fe7a555924feaf72ea49a9757b637887b9"} Oct 06 21:57:47 crc kubenswrapper[5014]: I1006 21:57:47.758767 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kgpkk" event={"ID":"e44a26db-3445-4786-8850-2ab3a5139ee2","Type":"ContainerStarted","Data":"528ab39d65f9a36e2ca36d31ecda000bd4df56d4dfd79c0ee5acedadecb1dd4f"} Oct 06 21:57:48 crc kubenswrapper[5014]: I1006 21:57:48.771466 5014 generic.go:334] "Generic (PLEG): container finished" podID="e44a26db-3445-4786-8850-2ab3a5139ee2" containerID="528ab39d65f9a36e2ca36d31ecda000bd4df56d4dfd79c0ee5acedadecb1dd4f" exitCode=0 Oct 06 21:57:48 crc kubenswrapper[5014]: I1006 21:57:48.771551 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kgpkk" event={"ID":"e44a26db-3445-4786-8850-2ab3a5139ee2","Type":"ContainerDied","Data":"528ab39d65f9a36e2ca36d31ecda000bd4df56d4dfd79c0ee5acedadecb1dd4f"} Oct 06 21:57:49 crc kubenswrapper[5014]: I1006 21:57:49.786456 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kgpkk" event={"ID":"e44a26db-3445-4786-8850-2ab3a5139ee2","Type":"ContainerStarted","Data":"20690a954fd589df9874d15c2da765938da8b6473dbffb867a047cafe6cd2a80"} Oct 06 21:57:49 crc kubenswrapper[5014]: I1006 21:57:49.828558 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kgpkk" podStartSLOduration=2.142506451 podStartE2EDuration="4.82852999s" podCreationTimestamp="2025-10-06 21:57:45 +0000 UTC" firstStartedPulling="2025-10-06 21:57:46.75295526 +0000 UTC m=+1612.045991994" lastFinishedPulling="2025-10-06 21:57:49.438978769 +0000 UTC m=+1614.732015533" observedRunningTime="2025-10-06 21:57:49.816901152 +0000 UTC m=+1615.109937916" watchObservedRunningTime="2025-10-06 21:57:49.82852999 +0000 UTC m=+1615.121566754" Oct 06 21:57:51 crc kubenswrapper[5014]: I1006 21:57:51.735344 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:57:51 crc kubenswrapper[5014]: I1006 21:57:51.735718 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:57:56 crc kubenswrapper[5014]: I1006 21:57:56.025675 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:57:56 crc kubenswrapper[5014]: I1006 21:57:56.025948 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:57:56 crc kubenswrapper[5014]: I1006 21:57:56.086804 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:57:56 crc kubenswrapper[5014]: I1006 21:57:56.969287 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:57:57 crc kubenswrapper[5014]: I1006 21:57:57.037876 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kgpkk"] Oct 06 21:57:58 crc kubenswrapper[5014]: I1006 21:57:58.915855 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-kgpkk" podUID="e44a26db-3445-4786-8850-2ab3a5139ee2" containerName="registry-server" containerID="cri-o://20690a954fd589df9874d15c2da765938da8b6473dbffb867a047cafe6cd2a80" gracePeriod=2 Oct 06 21:57:59 crc kubenswrapper[5014]: I1006 21:57:59.936754 5014 generic.go:334] "Generic (PLEG): container finished" podID="e44a26db-3445-4786-8850-2ab3a5139ee2" containerID="20690a954fd589df9874d15c2da765938da8b6473dbffb867a047cafe6cd2a80" exitCode=0 Oct 06 21:57:59 crc kubenswrapper[5014]: I1006 21:57:59.937408 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kgpkk" event={"ID":"e44a26db-3445-4786-8850-2ab3a5139ee2","Type":"ContainerDied","Data":"20690a954fd589df9874d15c2da765938da8b6473dbffb867a047cafe6cd2a80"} Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.539116 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.698431 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44a26db-3445-4786-8850-2ab3a5139ee2-catalog-content\") pod \"e44a26db-3445-4786-8850-2ab3a5139ee2\" (UID: \"e44a26db-3445-4786-8850-2ab3a5139ee2\") " Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.698828 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44a26db-3445-4786-8850-2ab3a5139ee2-utilities\") pod \"e44a26db-3445-4786-8850-2ab3a5139ee2\" (UID: \"e44a26db-3445-4786-8850-2ab3a5139ee2\") " Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.698917 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wkszn\" (UniqueName: \"kubernetes.io/projected/e44a26db-3445-4786-8850-2ab3a5139ee2-kube-api-access-wkszn\") pod \"e44a26db-3445-4786-8850-2ab3a5139ee2\" (UID: \"e44a26db-3445-4786-8850-2ab3a5139ee2\") " Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.699643 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e44a26db-3445-4786-8850-2ab3a5139ee2-utilities" (OuterVolumeSpecName: "utilities") pod "e44a26db-3445-4786-8850-2ab3a5139ee2" (UID: "e44a26db-3445-4786-8850-2ab3a5139ee2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.704741 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e44a26db-3445-4786-8850-2ab3a5139ee2-kube-api-access-wkszn" (OuterVolumeSpecName: "kube-api-access-wkszn") pod "e44a26db-3445-4786-8850-2ab3a5139ee2" (UID: "e44a26db-3445-4786-8850-2ab3a5139ee2"). InnerVolumeSpecName "kube-api-access-wkszn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.793575 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e44a26db-3445-4786-8850-2ab3a5139ee2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e44a26db-3445-4786-8850-2ab3a5139ee2" (UID: "e44a26db-3445-4786-8850-2ab3a5139ee2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.800950 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wkszn\" (UniqueName: \"kubernetes.io/projected/e44a26db-3445-4786-8850-2ab3a5139ee2-kube-api-access-wkszn\") on node \"crc\" DevicePath \"\"" Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.800997 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e44a26db-3445-4786-8850-2ab3a5139ee2-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.801017 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e44a26db-3445-4786-8850-2ab3a5139ee2-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.878822 5014 scope.go:117] "RemoveContainer" containerID="2c01c113ed0c4688ed51a7ea51e16ece0de9c6f03bc70c0f7a9b90f07fcbcac6" Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.908401 5014 scope.go:117] "RemoveContainer" containerID="a6b7e7b2fd6d1afbe67e44ec51642bedfaa579e1f6be92a11c811ecf98a44b29" Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.936718 5014 scope.go:117] "RemoveContainer" containerID="ca2e042630ca0fb08befe1a8638b57e312fd37f6bd86aa9c08da4b6cb0a9ea5a" Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.953477 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kgpkk" event={"ID":"e44a26db-3445-4786-8850-2ab3a5139ee2","Type":"ContainerDied","Data":"5a5ece1091b5e65cf341ffc539c1f7fe7a555924feaf72ea49a9757b637887b9"} Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.953558 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kgpkk" Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.953598 5014 scope.go:117] "RemoveContainer" containerID="20690a954fd589df9874d15c2da765938da8b6473dbffb867a047cafe6cd2a80" Oct 06 21:58:00 crc kubenswrapper[5014]: I1006 21:58:00.968243 5014 scope.go:117] "RemoveContainer" containerID="e0108697276545e2fd0df481de9931a00ca8d8ecc01d4199b56e5ce3ba02591e" Oct 06 21:58:01 crc kubenswrapper[5014]: I1006 21:58:01.009304 5014 scope.go:117] "RemoveContainer" containerID="528ab39d65f9a36e2ca36d31ecda000bd4df56d4dfd79c0ee5acedadecb1dd4f" Oct 06 21:58:01 crc kubenswrapper[5014]: I1006 21:58:01.029870 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kgpkk"] Oct 06 21:58:01 crc kubenswrapper[5014]: I1006 21:58:01.035242 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-kgpkk"] Oct 06 21:58:01 crc kubenswrapper[5014]: I1006 21:58:01.059700 5014 scope.go:117] "RemoveContainer" containerID="7943ac158fc9c82b6786736f3eafcf3a5d3ac1281259d04c7563c98199d84d1d" Oct 06 21:58:01 crc kubenswrapper[5014]: I1006 21:58:01.082016 5014 scope.go:117] "RemoveContainer" containerID="b529d4e20d1238c5c31dd96c650c947dbc1a490f138c087e683fd4609b200730" Oct 06 21:58:01 crc kubenswrapper[5014]: I1006 21:58:01.505136 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e44a26db-3445-4786-8850-2ab3a5139ee2" path="/var/lib/kubelet/pods/e44a26db-3445-4786-8850-2ab3a5139ee2/volumes" Oct 06 21:58:21 crc kubenswrapper[5014]: I1006 21:58:21.735193 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 21:58:21 crc kubenswrapper[5014]: I1006 21:58:21.735787 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 21:58:21 crc kubenswrapper[5014]: I1006 21:58:21.735843 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 21:58:21 crc kubenswrapper[5014]: I1006 21:58:21.736485 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 21:58:21 crc kubenswrapper[5014]: I1006 21:58:21.736556 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" gracePeriod=600 Oct 06 21:58:21 crc kubenswrapper[5014]: E1006 21:58:21.871253 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 21:58:22 crc kubenswrapper[5014]: I1006 21:58:22.171144 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" exitCode=0 Oct 06 21:58:22 crc kubenswrapper[5014]: I1006 21:58:22.171226 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa"} Oct 06 21:58:22 crc kubenswrapper[5014]: I1006 21:58:22.171715 5014 scope.go:117] "RemoveContainer" containerID="cbdf259ebb7c14fd9e76d97e1e3dfaef9e8de4a47de87f8694acba8d0b7b3bc4" Oct 06 21:58:22 crc kubenswrapper[5014]: I1006 21:58:22.172247 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 21:58:22 crc kubenswrapper[5014]: E1006 21:58:22.172854 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 21:58:33 crc kubenswrapper[5014]: I1006 21:58:33.484086 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 21:58:33 crc kubenswrapper[5014]: E1006 21:58:33.485234 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 21:58:47 crc kubenswrapper[5014]: I1006 21:58:47.484949 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 21:58:47 crc kubenswrapper[5014]: E1006 21:58:47.485932 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 21:59:00 crc kubenswrapper[5014]: I1006 21:59:00.484948 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 21:59:00 crc kubenswrapper[5014]: E1006 21:59:00.485802 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 21:59:01 crc kubenswrapper[5014]: I1006 21:59:01.207335 5014 scope.go:117] "RemoveContainer" containerID="7e310f1e94472fbc3a79b40ea198846da76836cf2835e1612a43c414454648ad" Oct 06 21:59:15 crc kubenswrapper[5014]: I1006 21:59:15.493866 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 21:59:15 crc kubenswrapper[5014]: E1006 21:59:15.497266 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 21:59:29 crc kubenswrapper[5014]: I1006 21:59:29.485096 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 21:59:29 crc kubenswrapper[5014]: E1006 21:59:29.486113 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 21:59:40 crc kubenswrapper[5014]: I1006 21:59:40.484774 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 21:59:40 crc kubenswrapper[5014]: E1006 21:59:40.485669 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 21:59:55 crc kubenswrapper[5014]: I1006 21:59:55.502613 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 21:59:55 crc kubenswrapper[5014]: E1006 21:59:55.505060 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.188408 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc"] Oct 06 22:00:00 crc kubenswrapper[5014]: E1006 22:00:00.191329 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e44a26db-3445-4786-8850-2ab3a5139ee2" containerName="extract-content" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.191563 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="e44a26db-3445-4786-8850-2ab3a5139ee2" containerName="extract-content" Oct 06 22:00:00 crc kubenswrapper[5014]: E1006 22:00:00.191807 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e44a26db-3445-4786-8850-2ab3a5139ee2" containerName="registry-server" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.191961 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="e44a26db-3445-4786-8850-2ab3a5139ee2" containerName="registry-server" Oct 06 22:00:00 crc kubenswrapper[5014]: E1006 22:00:00.192142 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e44a26db-3445-4786-8850-2ab3a5139ee2" containerName="extract-utilities" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.192293 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="e44a26db-3445-4786-8850-2ab3a5139ee2" containerName="extract-utilities" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.192708 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="e44a26db-3445-4786-8850-2ab3a5139ee2" containerName="registry-server" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.193762 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.196565 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc"] Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.218432 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.219248 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.220074 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bd2b1a65-b02a-423f-b86f-e581dbdb0367-config-volume\") pod \"collect-profiles-29329800-96tfc\" (UID: \"bd2b1a65-b02a-423f-b86f-e581dbdb0367\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.220226 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9rg4\" (UniqueName: \"kubernetes.io/projected/bd2b1a65-b02a-423f-b86f-e581dbdb0367-kube-api-access-q9rg4\") pod \"collect-profiles-29329800-96tfc\" (UID: \"bd2b1a65-b02a-423f-b86f-e581dbdb0367\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.220510 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bd2b1a65-b02a-423f-b86f-e581dbdb0367-secret-volume\") pod \"collect-profiles-29329800-96tfc\" (UID: \"bd2b1a65-b02a-423f-b86f-e581dbdb0367\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.322147 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9rg4\" (UniqueName: \"kubernetes.io/projected/bd2b1a65-b02a-423f-b86f-e581dbdb0367-kube-api-access-q9rg4\") pod \"collect-profiles-29329800-96tfc\" (UID: \"bd2b1a65-b02a-423f-b86f-e581dbdb0367\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.322347 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bd2b1a65-b02a-423f-b86f-e581dbdb0367-secret-volume\") pod \"collect-profiles-29329800-96tfc\" (UID: \"bd2b1a65-b02a-423f-b86f-e581dbdb0367\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.322439 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bd2b1a65-b02a-423f-b86f-e581dbdb0367-config-volume\") pod \"collect-profiles-29329800-96tfc\" (UID: \"bd2b1a65-b02a-423f-b86f-e581dbdb0367\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.323665 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bd2b1a65-b02a-423f-b86f-e581dbdb0367-config-volume\") pod \"collect-profiles-29329800-96tfc\" (UID: \"bd2b1a65-b02a-423f-b86f-e581dbdb0367\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.331367 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bd2b1a65-b02a-423f-b86f-e581dbdb0367-secret-volume\") pod \"collect-profiles-29329800-96tfc\" (UID: \"bd2b1a65-b02a-423f-b86f-e581dbdb0367\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.347110 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9rg4\" (UniqueName: \"kubernetes.io/projected/bd2b1a65-b02a-423f-b86f-e581dbdb0367-kube-api-access-q9rg4\") pod \"collect-profiles-29329800-96tfc\" (UID: \"bd2b1a65-b02a-423f-b86f-e581dbdb0367\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.542926 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" Oct 06 22:00:00 crc kubenswrapper[5014]: I1006 22:00:00.821495 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc"] Oct 06 22:00:01 crc kubenswrapper[5014]: I1006 22:00:01.124945 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" event={"ID":"bd2b1a65-b02a-423f-b86f-e581dbdb0367","Type":"ContainerStarted","Data":"0e5ab00455d56f476cd0f04f27664546c93179954790f83d19c18bc3546f1d07"} Oct 06 22:00:01 crc kubenswrapper[5014]: I1006 22:00:01.125028 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" event={"ID":"bd2b1a65-b02a-423f-b86f-e581dbdb0367","Type":"ContainerStarted","Data":"93b716f2485d7f4ecb30342f9c2a323aa31c7efa0d45c14ada8c950e9b2d80f7"} Oct 06 22:00:01 crc kubenswrapper[5014]: I1006 22:00:01.149379 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" podStartSLOduration=1.149343587 podStartE2EDuration="1.149343587s" podCreationTimestamp="2025-10-06 22:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:00:01.147918523 +0000 UTC m=+1746.440955277" watchObservedRunningTime="2025-10-06 22:00:01.149343587 +0000 UTC m=+1746.442380361" Oct 06 22:00:01 crc kubenswrapper[5014]: I1006 22:00:01.316263 5014 scope.go:117] "RemoveContainer" containerID="3e401ff685888d65e22cfe526043fdc38f33382719d48441e4570e416cd7a132" Oct 06 22:00:02 crc kubenswrapper[5014]: I1006 22:00:02.137892 5014 generic.go:334] "Generic (PLEG): container finished" podID="bd2b1a65-b02a-423f-b86f-e581dbdb0367" containerID="0e5ab00455d56f476cd0f04f27664546c93179954790f83d19c18bc3546f1d07" exitCode=0 Oct 06 22:00:02 crc kubenswrapper[5014]: I1006 22:00:02.137998 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" event={"ID":"bd2b1a65-b02a-423f-b86f-e581dbdb0367","Type":"ContainerDied","Data":"0e5ab00455d56f476cd0f04f27664546c93179954790f83d19c18bc3546f1d07"} Oct 06 22:00:03 crc kubenswrapper[5014]: I1006 22:00:03.512846 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" Oct 06 22:00:03 crc kubenswrapper[5014]: I1006 22:00:03.575952 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bd2b1a65-b02a-423f-b86f-e581dbdb0367-config-volume\") pod \"bd2b1a65-b02a-423f-b86f-e581dbdb0367\" (UID: \"bd2b1a65-b02a-423f-b86f-e581dbdb0367\") " Oct 06 22:00:03 crc kubenswrapper[5014]: I1006 22:00:03.576085 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bd2b1a65-b02a-423f-b86f-e581dbdb0367-secret-volume\") pod \"bd2b1a65-b02a-423f-b86f-e581dbdb0367\" (UID: \"bd2b1a65-b02a-423f-b86f-e581dbdb0367\") " Oct 06 22:00:03 crc kubenswrapper[5014]: I1006 22:00:03.576127 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9rg4\" (UniqueName: \"kubernetes.io/projected/bd2b1a65-b02a-423f-b86f-e581dbdb0367-kube-api-access-q9rg4\") pod \"bd2b1a65-b02a-423f-b86f-e581dbdb0367\" (UID: \"bd2b1a65-b02a-423f-b86f-e581dbdb0367\") " Oct 06 22:00:03 crc kubenswrapper[5014]: I1006 22:00:03.576884 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd2b1a65-b02a-423f-b86f-e581dbdb0367-config-volume" (OuterVolumeSpecName: "config-volume") pod "bd2b1a65-b02a-423f-b86f-e581dbdb0367" (UID: "bd2b1a65-b02a-423f-b86f-e581dbdb0367"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:00:03 crc kubenswrapper[5014]: I1006 22:00:03.582545 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd2b1a65-b02a-423f-b86f-e581dbdb0367-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "bd2b1a65-b02a-423f-b86f-e581dbdb0367" (UID: "bd2b1a65-b02a-423f-b86f-e581dbdb0367"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 22:00:03 crc kubenswrapper[5014]: I1006 22:00:03.583139 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd2b1a65-b02a-423f-b86f-e581dbdb0367-kube-api-access-q9rg4" (OuterVolumeSpecName: "kube-api-access-q9rg4") pod "bd2b1a65-b02a-423f-b86f-e581dbdb0367" (UID: "bd2b1a65-b02a-423f-b86f-e581dbdb0367"). InnerVolumeSpecName "kube-api-access-q9rg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:00:03 crc kubenswrapper[5014]: I1006 22:00:03.678122 5014 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bd2b1a65-b02a-423f-b86f-e581dbdb0367-config-volume\") on node \"crc\" DevicePath \"\"" Oct 06 22:00:03 crc kubenswrapper[5014]: I1006 22:00:03.678151 5014 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bd2b1a65-b02a-423f-b86f-e581dbdb0367-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 06 22:00:03 crc kubenswrapper[5014]: I1006 22:00:03.678165 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9rg4\" (UniqueName: \"kubernetes.io/projected/bd2b1a65-b02a-423f-b86f-e581dbdb0367-kube-api-access-q9rg4\") on node \"crc\" DevicePath \"\"" Oct 06 22:00:04 crc kubenswrapper[5014]: I1006 22:00:04.161370 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" event={"ID":"bd2b1a65-b02a-423f-b86f-e581dbdb0367","Type":"ContainerDied","Data":"93b716f2485d7f4ecb30342f9c2a323aa31c7efa0d45c14ada8c950e9b2d80f7"} Oct 06 22:00:04 crc kubenswrapper[5014]: I1006 22:00:04.161474 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="93b716f2485d7f4ecb30342f9c2a323aa31c7efa0d45c14ada8c950e9b2d80f7" Oct 06 22:00:04 crc kubenswrapper[5014]: I1006 22:00:04.161728 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc" Oct 06 22:00:09 crc kubenswrapper[5014]: I1006 22:00:09.484892 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:00:09 crc kubenswrapper[5014]: E1006 22:00:09.485806 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:00:20 crc kubenswrapper[5014]: I1006 22:00:20.484413 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:00:20 crc kubenswrapper[5014]: E1006 22:00:20.485497 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:00:35 crc kubenswrapper[5014]: I1006 22:00:35.513197 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:00:35 crc kubenswrapper[5014]: E1006 22:00:35.514667 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:00:48 crc kubenswrapper[5014]: I1006 22:00:48.484876 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:00:48 crc kubenswrapper[5014]: E1006 22:00:48.486059 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:01:01 crc kubenswrapper[5014]: I1006 22:01:01.485254 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:01:01 crc kubenswrapper[5014]: E1006 22:01:01.486417 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:01:15 crc kubenswrapper[5014]: I1006 22:01:15.491732 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:01:15 crc kubenswrapper[5014]: E1006 22:01:15.492945 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:01:29 crc kubenswrapper[5014]: I1006 22:01:29.486678 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:01:29 crc kubenswrapper[5014]: E1006 22:01:29.487557 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:01:41 crc kubenswrapper[5014]: I1006 22:01:41.487584 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:01:41 crc kubenswrapper[5014]: E1006 22:01:41.488536 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:01:52 crc kubenswrapper[5014]: I1006 22:01:52.484947 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:01:52 crc kubenswrapper[5014]: E1006 22:01:52.485969 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:02:04 crc kubenswrapper[5014]: I1006 22:02:04.485566 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:02:04 crc kubenswrapper[5014]: E1006 22:02:04.486944 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:02:17 crc kubenswrapper[5014]: I1006 22:02:17.485687 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:02:17 crc kubenswrapper[5014]: E1006 22:02:17.486682 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:02:30 crc kubenswrapper[5014]: I1006 22:02:30.484305 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:02:30 crc kubenswrapper[5014]: E1006 22:02:30.485086 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:02:41 crc kubenswrapper[5014]: I1006 22:02:41.484834 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:02:41 crc kubenswrapper[5014]: E1006 22:02:41.485877 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:02:55 crc kubenswrapper[5014]: I1006 22:02:55.491765 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:02:55 crc kubenswrapper[5014]: E1006 22:02:55.493440 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:03:08 crc kubenswrapper[5014]: I1006 22:03:08.485399 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:03:08 crc kubenswrapper[5014]: E1006 22:03:08.486517 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:03:19 crc kubenswrapper[5014]: I1006 22:03:19.484839 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:03:19 crc kubenswrapper[5014]: E1006 22:03:19.486183 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:03:33 crc kubenswrapper[5014]: I1006 22:03:33.485812 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:03:34 crc kubenswrapper[5014]: I1006 22:03:34.188491 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"c1106c1c6304da4faf8da0a255de2a35cc578d87197c127c1e338edccadc53df"} Oct 06 22:05:17 crc kubenswrapper[5014]: I1006 22:05:17.577525 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hwnnp"] Oct 06 22:05:17 crc kubenswrapper[5014]: E1006 22:05:17.580453 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd2b1a65-b02a-423f-b86f-e581dbdb0367" containerName="collect-profiles" Oct 06 22:05:17 crc kubenswrapper[5014]: I1006 22:05:17.580474 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd2b1a65-b02a-423f-b86f-e581dbdb0367" containerName="collect-profiles" Oct 06 22:05:17 crc kubenswrapper[5014]: I1006 22:05:17.580836 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd2b1a65-b02a-423f-b86f-e581dbdb0367" containerName="collect-profiles" Oct 06 22:05:17 crc kubenswrapper[5014]: I1006 22:05:17.598461 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:17 crc kubenswrapper[5014]: I1006 22:05:17.608077 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hwnnp"] Oct 06 22:05:17 crc kubenswrapper[5014]: I1006 22:05:17.792900 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c065a627-8c11-42f9-8d0f-59b00be16888-catalog-content\") pod \"redhat-marketplace-hwnnp\" (UID: \"c065a627-8c11-42f9-8d0f-59b00be16888\") " pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:17 crc kubenswrapper[5014]: I1006 22:05:17.792958 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4n7hf\" (UniqueName: \"kubernetes.io/projected/c065a627-8c11-42f9-8d0f-59b00be16888-kube-api-access-4n7hf\") pod \"redhat-marketplace-hwnnp\" (UID: \"c065a627-8c11-42f9-8d0f-59b00be16888\") " pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:17 crc kubenswrapper[5014]: I1006 22:05:17.793206 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c065a627-8c11-42f9-8d0f-59b00be16888-utilities\") pod \"redhat-marketplace-hwnnp\" (UID: \"c065a627-8c11-42f9-8d0f-59b00be16888\") " pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:17 crc kubenswrapper[5014]: I1006 22:05:17.894017 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c065a627-8c11-42f9-8d0f-59b00be16888-catalog-content\") pod \"redhat-marketplace-hwnnp\" (UID: \"c065a627-8c11-42f9-8d0f-59b00be16888\") " pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:17 crc kubenswrapper[5014]: I1006 22:05:17.894106 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4n7hf\" (UniqueName: \"kubernetes.io/projected/c065a627-8c11-42f9-8d0f-59b00be16888-kube-api-access-4n7hf\") pod \"redhat-marketplace-hwnnp\" (UID: \"c065a627-8c11-42f9-8d0f-59b00be16888\") " pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:17 crc kubenswrapper[5014]: I1006 22:05:17.894258 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c065a627-8c11-42f9-8d0f-59b00be16888-utilities\") pod \"redhat-marketplace-hwnnp\" (UID: \"c065a627-8c11-42f9-8d0f-59b00be16888\") " pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:17 crc kubenswrapper[5014]: I1006 22:05:17.895901 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c065a627-8c11-42f9-8d0f-59b00be16888-utilities\") pod \"redhat-marketplace-hwnnp\" (UID: \"c065a627-8c11-42f9-8d0f-59b00be16888\") " pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:17 crc kubenswrapper[5014]: I1006 22:05:17.895948 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c065a627-8c11-42f9-8d0f-59b00be16888-catalog-content\") pod \"redhat-marketplace-hwnnp\" (UID: \"c065a627-8c11-42f9-8d0f-59b00be16888\") " pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:17 crc kubenswrapper[5014]: I1006 22:05:17.921145 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4n7hf\" (UniqueName: \"kubernetes.io/projected/c065a627-8c11-42f9-8d0f-59b00be16888-kube-api-access-4n7hf\") pod \"redhat-marketplace-hwnnp\" (UID: \"c065a627-8c11-42f9-8d0f-59b00be16888\") " pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:17 crc kubenswrapper[5014]: I1006 22:05:17.932396 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:18 crc kubenswrapper[5014]: I1006 22:05:18.397163 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hwnnp"] Oct 06 22:05:18 crc kubenswrapper[5014]: W1006 22:05:18.410005 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc065a627_8c11_42f9_8d0f_59b00be16888.slice/crio-82e07bf8ed9efcb328d7a5108831d2d9524047f60c39e54d528efc4f524d6ad9 WatchSource:0}: Error finding container 82e07bf8ed9efcb328d7a5108831d2d9524047f60c39e54d528efc4f524d6ad9: Status 404 returned error can't find the container with id 82e07bf8ed9efcb328d7a5108831d2d9524047f60c39e54d528efc4f524d6ad9 Oct 06 22:05:19 crc kubenswrapper[5014]: I1006 22:05:19.184467 5014 generic.go:334] "Generic (PLEG): container finished" podID="c065a627-8c11-42f9-8d0f-59b00be16888" containerID="096dd7e8e945237651f0cf827fa435a9302e4a8b41f55ba6cc90a2107cc5861a" exitCode=0 Oct 06 22:05:19 crc kubenswrapper[5014]: I1006 22:05:19.184698 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwnnp" event={"ID":"c065a627-8c11-42f9-8d0f-59b00be16888","Type":"ContainerDied","Data":"096dd7e8e945237651f0cf827fa435a9302e4a8b41f55ba6cc90a2107cc5861a"} Oct 06 22:05:19 crc kubenswrapper[5014]: I1006 22:05:19.184986 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwnnp" event={"ID":"c065a627-8c11-42f9-8d0f-59b00be16888","Type":"ContainerStarted","Data":"82e07bf8ed9efcb328d7a5108831d2d9524047f60c39e54d528efc4f524d6ad9"} Oct 06 22:05:19 crc kubenswrapper[5014]: I1006 22:05:19.188106 5014 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 06 22:05:21 crc kubenswrapper[5014]: I1006 22:05:21.212777 5014 generic.go:334] "Generic (PLEG): container finished" podID="c065a627-8c11-42f9-8d0f-59b00be16888" containerID="0ec2d32dfe5126fc52d94dc09bc4c8fe2208a08fff7e58539429dca79fdff17b" exitCode=0 Oct 06 22:05:21 crc kubenswrapper[5014]: I1006 22:05:21.212850 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwnnp" event={"ID":"c065a627-8c11-42f9-8d0f-59b00be16888","Type":"ContainerDied","Data":"0ec2d32dfe5126fc52d94dc09bc4c8fe2208a08fff7e58539429dca79fdff17b"} Oct 06 22:05:22 crc kubenswrapper[5014]: I1006 22:05:22.228721 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwnnp" event={"ID":"c065a627-8c11-42f9-8d0f-59b00be16888","Type":"ContainerStarted","Data":"8fcc00b2ab847f1851c2b270360e489cbef290ea48a6c395dece2fef1e014496"} Oct 06 22:05:22 crc kubenswrapper[5014]: I1006 22:05:22.259534 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hwnnp" podStartSLOduration=2.789861757 podStartE2EDuration="5.259502611s" podCreationTimestamp="2025-10-06 22:05:17 +0000 UTC" firstStartedPulling="2025-10-06 22:05:19.187688775 +0000 UTC m=+2064.480725549" lastFinishedPulling="2025-10-06 22:05:21.657329629 +0000 UTC m=+2066.950366403" observedRunningTime="2025-10-06 22:05:22.25345811 +0000 UTC m=+2067.546494884" watchObservedRunningTime="2025-10-06 22:05:22.259502611 +0000 UTC m=+2067.552539375" Oct 06 22:05:27 crc kubenswrapper[5014]: I1006 22:05:27.932551 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:27 crc kubenswrapper[5014]: I1006 22:05:27.933329 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:27 crc kubenswrapper[5014]: I1006 22:05:27.996975 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:28 crc kubenswrapper[5014]: I1006 22:05:28.355239 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:28 crc kubenswrapper[5014]: I1006 22:05:28.430032 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hwnnp"] Oct 06 22:05:30 crc kubenswrapper[5014]: I1006 22:05:30.309305 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hwnnp" podUID="c065a627-8c11-42f9-8d0f-59b00be16888" containerName="registry-server" containerID="cri-o://8fcc00b2ab847f1851c2b270360e489cbef290ea48a6c395dece2fef1e014496" gracePeriod=2 Oct 06 22:05:30 crc kubenswrapper[5014]: I1006 22:05:30.831899 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.020211 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4n7hf\" (UniqueName: \"kubernetes.io/projected/c065a627-8c11-42f9-8d0f-59b00be16888-kube-api-access-4n7hf\") pod \"c065a627-8c11-42f9-8d0f-59b00be16888\" (UID: \"c065a627-8c11-42f9-8d0f-59b00be16888\") " Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.020686 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c065a627-8c11-42f9-8d0f-59b00be16888-catalog-content\") pod \"c065a627-8c11-42f9-8d0f-59b00be16888\" (UID: \"c065a627-8c11-42f9-8d0f-59b00be16888\") " Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.025854 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c065a627-8c11-42f9-8d0f-59b00be16888-utilities\") pod \"c065a627-8c11-42f9-8d0f-59b00be16888\" (UID: \"c065a627-8c11-42f9-8d0f-59b00be16888\") " Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.027311 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c065a627-8c11-42f9-8d0f-59b00be16888-utilities" (OuterVolumeSpecName: "utilities") pod "c065a627-8c11-42f9-8d0f-59b00be16888" (UID: "c065a627-8c11-42f9-8d0f-59b00be16888"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.034935 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c065a627-8c11-42f9-8d0f-59b00be16888-kube-api-access-4n7hf" (OuterVolumeSpecName: "kube-api-access-4n7hf") pod "c065a627-8c11-42f9-8d0f-59b00be16888" (UID: "c065a627-8c11-42f9-8d0f-59b00be16888"). InnerVolumeSpecName "kube-api-access-4n7hf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.049407 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c065a627-8c11-42f9-8d0f-59b00be16888-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c065a627-8c11-42f9-8d0f-59b00be16888" (UID: "c065a627-8c11-42f9-8d0f-59b00be16888"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.127745 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c065a627-8c11-42f9-8d0f-59b00be16888-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.127796 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c065a627-8c11-42f9-8d0f-59b00be16888-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.127815 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4n7hf\" (UniqueName: \"kubernetes.io/projected/c065a627-8c11-42f9-8d0f-59b00be16888-kube-api-access-4n7hf\") on node \"crc\" DevicePath \"\"" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.322154 5014 generic.go:334] "Generic (PLEG): container finished" podID="c065a627-8c11-42f9-8d0f-59b00be16888" containerID="8fcc00b2ab847f1851c2b270360e489cbef290ea48a6c395dece2fef1e014496" exitCode=0 Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.322202 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwnnp" event={"ID":"c065a627-8c11-42f9-8d0f-59b00be16888","Type":"ContainerDied","Data":"8fcc00b2ab847f1851c2b270360e489cbef290ea48a6c395dece2fef1e014496"} Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.322230 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwnnp" event={"ID":"c065a627-8c11-42f9-8d0f-59b00be16888","Type":"ContainerDied","Data":"82e07bf8ed9efcb328d7a5108831d2d9524047f60c39e54d528efc4f524d6ad9"} Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.322250 5014 scope.go:117] "RemoveContainer" containerID="8fcc00b2ab847f1851c2b270360e489cbef290ea48a6c395dece2fef1e014496" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.322408 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hwnnp" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.366304 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hwnnp"] Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.366513 5014 scope.go:117] "RemoveContainer" containerID="0ec2d32dfe5126fc52d94dc09bc4c8fe2208a08fff7e58539429dca79fdff17b" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.374832 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hwnnp"] Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.398189 5014 scope.go:117] "RemoveContainer" containerID="096dd7e8e945237651f0cf827fa435a9302e4a8b41f55ba6cc90a2107cc5861a" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.447428 5014 scope.go:117] "RemoveContainer" containerID="8fcc00b2ab847f1851c2b270360e489cbef290ea48a6c395dece2fef1e014496" Oct 06 22:05:31 crc kubenswrapper[5014]: E1006 22:05:31.448056 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fcc00b2ab847f1851c2b270360e489cbef290ea48a6c395dece2fef1e014496\": container with ID starting with 8fcc00b2ab847f1851c2b270360e489cbef290ea48a6c395dece2fef1e014496 not found: ID does not exist" containerID="8fcc00b2ab847f1851c2b270360e489cbef290ea48a6c395dece2fef1e014496" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.448126 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fcc00b2ab847f1851c2b270360e489cbef290ea48a6c395dece2fef1e014496"} err="failed to get container status \"8fcc00b2ab847f1851c2b270360e489cbef290ea48a6c395dece2fef1e014496\": rpc error: code = NotFound desc = could not find container \"8fcc00b2ab847f1851c2b270360e489cbef290ea48a6c395dece2fef1e014496\": container with ID starting with 8fcc00b2ab847f1851c2b270360e489cbef290ea48a6c395dece2fef1e014496 not found: ID does not exist" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.448168 5014 scope.go:117] "RemoveContainer" containerID="0ec2d32dfe5126fc52d94dc09bc4c8fe2208a08fff7e58539429dca79fdff17b" Oct 06 22:05:31 crc kubenswrapper[5014]: E1006 22:05:31.448739 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ec2d32dfe5126fc52d94dc09bc4c8fe2208a08fff7e58539429dca79fdff17b\": container with ID starting with 0ec2d32dfe5126fc52d94dc09bc4c8fe2208a08fff7e58539429dca79fdff17b not found: ID does not exist" containerID="0ec2d32dfe5126fc52d94dc09bc4c8fe2208a08fff7e58539429dca79fdff17b" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.448805 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ec2d32dfe5126fc52d94dc09bc4c8fe2208a08fff7e58539429dca79fdff17b"} err="failed to get container status \"0ec2d32dfe5126fc52d94dc09bc4c8fe2208a08fff7e58539429dca79fdff17b\": rpc error: code = NotFound desc = could not find container \"0ec2d32dfe5126fc52d94dc09bc4c8fe2208a08fff7e58539429dca79fdff17b\": container with ID starting with 0ec2d32dfe5126fc52d94dc09bc4c8fe2208a08fff7e58539429dca79fdff17b not found: ID does not exist" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.448852 5014 scope.go:117] "RemoveContainer" containerID="096dd7e8e945237651f0cf827fa435a9302e4a8b41f55ba6cc90a2107cc5861a" Oct 06 22:05:31 crc kubenswrapper[5014]: E1006 22:05:31.449393 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"096dd7e8e945237651f0cf827fa435a9302e4a8b41f55ba6cc90a2107cc5861a\": container with ID starting with 096dd7e8e945237651f0cf827fa435a9302e4a8b41f55ba6cc90a2107cc5861a not found: ID does not exist" containerID="096dd7e8e945237651f0cf827fa435a9302e4a8b41f55ba6cc90a2107cc5861a" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.449434 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"096dd7e8e945237651f0cf827fa435a9302e4a8b41f55ba6cc90a2107cc5861a"} err="failed to get container status \"096dd7e8e945237651f0cf827fa435a9302e4a8b41f55ba6cc90a2107cc5861a\": rpc error: code = NotFound desc = could not find container \"096dd7e8e945237651f0cf827fa435a9302e4a8b41f55ba6cc90a2107cc5861a\": container with ID starting with 096dd7e8e945237651f0cf827fa435a9302e4a8b41f55ba6cc90a2107cc5861a not found: ID does not exist" Oct 06 22:05:31 crc kubenswrapper[5014]: I1006 22:05:31.499818 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c065a627-8c11-42f9-8d0f-59b00be16888" path="/var/lib/kubelet/pods/c065a627-8c11-42f9-8d0f-59b00be16888/volumes" Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.459330 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-s9j6w"] Oct 06 22:05:42 crc kubenswrapper[5014]: E1006 22:05:42.460533 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c065a627-8c11-42f9-8d0f-59b00be16888" containerName="extract-utilities" Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.460554 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c065a627-8c11-42f9-8d0f-59b00be16888" containerName="extract-utilities" Oct 06 22:05:42 crc kubenswrapper[5014]: E1006 22:05:42.460590 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c065a627-8c11-42f9-8d0f-59b00be16888" containerName="registry-server" Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.460603 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c065a627-8c11-42f9-8d0f-59b00be16888" containerName="registry-server" Oct 06 22:05:42 crc kubenswrapper[5014]: E1006 22:05:42.460655 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c065a627-8c11-42f9-8d0f-59b00be16888" containerName="extract-content" Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.460669 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c065a627-8c11-42f9-8d0f-59b00be16888" containerName="extract-content" Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.460915 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="c065a627-8c11-42f9-8d0f-59b00be16888" containerName="registry-server" Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.463385 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.480038 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s9j6w"] Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.606214 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/513fd530-f145-4691-8829-cd2b828884d7-catalog-content\") pod \"certified-operators-s9j6w\" (UID: \"513fd530-f145-4691-8829-cd2b828884d7\") " pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.606301 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/513fd530-f145-4691-8829-cd2b828884d7-utilities\") pod \"certified-operators-s9j6w\" (UID: \"513fd530-f145-4691-8829-cd2b828884d7\") " pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.606491 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bw6z9\" (UniqueName: \"kubernetes.io/projected/513fd530-f145-4691-8829-cd2b828884d7-kube-api-access-bw6z9\") pod \"certified-operators-s9j6w\" (UID: \"513fd530-f145-4691-8829-cd2b828884d7\") " pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.708614 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/513fd530-f145-4691-8829-cd2b828884d7-catalog-content\") pod \"certified-operators-s9j6w\" (UID: \"513fd530-f145-4691-8829-cd2b828884d7\") " pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.707775 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/513fd530-f145-4691-8829-cd2b828884d7-catalog-content\") pod \"certified-operators-s9j6w\" (UID: \"513fd530-f145-4691-8829-cd2b828884d7\") " pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.710065 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/513fd530-f145-4691-8829-cd2b828884d7-utilities\") pod \"certified-operators-s9j6w\" (UID: \"513fd530-f145-4691-8829-cd2b828884d7\") " pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.710729 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/513fd530-f145-4691-8829-cd2b828884d7-utilities\") pod \"certified-operators-s9j6w\" (UID: \"513fd530-f145-4691-8829-cd2b828884d7\") " pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.711118 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bw6z9\" (UniqueName: \"kubernetes.io/projected/513fd530-f145-4691-8829-cd2b828884d7-kube-api-access-bw6z9\") pod \"certified-operators-s9j6w\" (UID: \"513fd530-f145-4691-8829-cd2b828884d7\") " pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.739295 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bw6z9\" (UniqueName: \"kubernetes.io/projected/513fd530-f145-4691-8829-cd2b828884d7-kube-api-access-bw6z9\") pod \"certified-operators-s9j6w\" (UID: \"513fd530-f145-4691-8829-cd2b828884d7\") " pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:42 crc kubenswrapper[5014]: I1006 22:05:42.806185 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:43 crc kubenswrapper[5014]: I1006 22:05:43.364292 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s9j6w"] Oct 06 22:05:43 crc kubenswrapper[5014]: W1006 22:05:43.376362 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod513fd530_f145_4691_8829_cd2b828884d7.slice/crio-dc3c068120ed178db50175d61e41e5e0365fe17c0f1c0712d84adc9bb916fb76 WatchSource:0}: Error finding container dc3c068120ed178db50175d61e41e5e0365fe17c0f1c0712d84adc9bb916fb76: Status 404 returned error can't find the container with id dc3c068120ed178db50175d61e41e5e0365fe17c0f1c0712d84adc9bb916fb76 Oct 06 22:05:43 crc kubenswrapper[5014]: I1006 22:05:43.449155 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9j6w" event={"ID":"513fd530-f145-4691-8829-cd2b828884d7","Type":"ContainerStarted","Data":"dc3c068120ed178db50175d61e41e5e0365fe17c0f1c0712d84adc9bb916fb76"} Oct 06 22:05:44 crc kubenswrapper[5014]: I1006 22:05:44.460364 5014 generic.go:334] "Generic (PLEG): container finished" podID="513fd530-f145-4691-8829-cd2b828884d7" containerID="34472cdf63ebc43b8831d01be61f24519c3d36960f9051645f08a2a5934a5ebd" exitCode=0 Oct 06 22:05:44 crc kubenswrapper[5014]: I1006 22:05:44.460464 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9j6w" event={"ID":"513fd530-f145-4691-8829-cd2b828884d7","Type":"ContainerDied","Data":"34472cdf63ebc43b8831d01be61f24519c3d36960f9051645f08a2a5934a5ebd"} Oct 06 22:05:45 crc kubenswrapper[5014]: I1006 22:05:45.480386 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9j6w" event={"ID":"513fd530-f145-4691-8829-cd2b828884d7","Type":"ContainerStarted","Data":"8d712bf5df5c967426e5d0becb9df3d8152f45564dc7f0eaeb7474c613bbf56f"} Oct 06 22:05:46 crc kubenswrapper[5014]: I1006 22:05:46.490178 5014 generic.go:334] "Generic (PLEG): container finished" podID="513fd530-f145-4691-8829-cd2b828884d7" containerID="8d712bf5df5c967426e5d0becb9df3d8152f45564dc7f0eaeb7474c613bbf56f" exitCode=0 Oct 06 22:05:46 crc kubenswrapper[5014]: I1006 22:05:46.490283 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9j6w" event={"ID":"513fd530-f145-4691-8829-cd2b828884d7","Type":"ContainerDied","Data":"8d712bf5df5c967426e5d0becb9df3d8152f45564dc7f0eaeb7474c613bbf56f"} Oct 06 22:05:47 crc kubenswrapper[5014]: I1006 22:05:47.500034 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9j6w" event={"ID":"513fd530-f145-4691-8829-cd2b828884d7","Type":"ContainerStarted","Data":"32d8bece09bf1bde34c88a1f6b8a8585d32756c9df6be4bf7351640b8d2514fd"} Oct 06 22:05:47 crc kubenswrapper[5014]: I1006 22:05:47.523169 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-s9j6w" podStartSLOduration=3.022352313 podStartE2EDuration="5.523150226s" podCreationTimestamp="2025-10-06 22:05:42 +0000 UTC" firstStartedPulling="2025-10-06 22:05:44.464050779 +0000 UTC m=+2089.757087543" lastFinishedPulling="2025-10-06 22:05:46.964848712 +0000 UTC m=+2092.257885456" observedRunningTime="2025-10-06 22:05:47.517202869 +0000 UTC m=+2092.810239613" watchObservedRunningTime="2025-10-06 22:05:47.523150226 +0000 UTC m=+2092.816186980" Oct 06 22:05:51 crc kubenswrapper[5014]: I1006 22:05:51.735794 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:05:51 crc kubenswrapper[5014]: I1006 22:05:51.736347 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:05:52 crc kubenswrapper[5014]: I1006 22:05:52.806489 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:52 crc kubenswrapper[5014]: I1006 22:05:52.807029 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:52 crc kubenswrapper[5014]: I1006 22:05:52.881804 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:53 crc kubenswrapper[5014]: I1006 22:05:53.649906 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:53 crc kubenswrapper[5014]: I1006 22:05:53.711598 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-s9j6w"] Oct 06 22:05:55 crc kubenswrapper[5014]: I1006 22:05:55.571898 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-s9j6w" podUID="513fd530-f145-4691-8829-cd2b828884d7" containerName="registry-server" containerID="cri-o://32d8bece09bf1bde34c88a1f6b8a8585d32756c9df6be4bf7351640b8d2514fd" gracePeriod=2 Oct 06 22:05:56 crc kubenswrapper[5014]: I1006 22:05:56.590189 5014 generic.go:334] "Generic (PLEG): container finished" podID="513fd530-f145-4691-8829-cd2b828884d7" containerID="32d8bece09bf1bde34c88a1f6b8a8585d32756c9df6be4bf7351640b8d2514fd" exitCode=0 Oct 06 22:05:56 crc kubenswrapper[5014]: I1006 22:05:56.590538 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9j6w" event={"ID":"513fd530-f145-4691-8829-cd2b828884d7","Type":"ContainerDied","Data":"32d8bece09bf1bde34c88a1f6b8a8585d32756c9df6be4bf7351640b8d2514fd"} Oct 06 22:05:56 crc kubenswrapper[5014]: I1006 22:05:56.696558 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:56 crc kubenswrapper[5014]: I1006 22:05:56.842710 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/513fd530-f145-4691-8829-cd2b828884d7-utilities\") pod \"513fd530-f145-4691-8829-cd2b828884d7\" (UID: \"513fd530-f145-4691-8829-cd2b828884d7\") " Oct 06 22:05:56 crc kubenswrapper[5014]: I1006 22:05:56.842873 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/513fd530-f145-4691-8829-cd2b828884d7-catalog-content\") pod \"513fd530-f145-4691-8829-cd2b828884d7\" (UID: \"513fd530-f145-4691-8829-cd2b828884d7\") " Oct 06 22:05:56 crc kubenswrapper[5014]: I1006 22:05:56.842999 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bw6z9\" (UniqueName: \"kubernetes.io/projected/513fd530-f145-4691-8829-cd2b828884d7-kube-api-access-bw6z9\") pod \"513fd530-f145-4691-8829-cd2b828884d7\" (UID: \"513fd530-f145-4691-8829-cd2b828884d7\") " Oct 06 22:05:56 crc kubenswrapper[5014]: I1006 22:05:56.845556 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/513fd530-f145-4691-8829-cd2b828884d7-utilities" (OuterVolumeSpecName: "utilities") pod "513fd530-f145-4691-8829-cd2b828884d7" (UID: "513fd530-f145-4691-8829-cd2b828884d7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:05:56 crc kubenswrapper[5014]: I1006 22:05:56.854954 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/513fd530-f145-4691-8829-cd2b828884d7-kube-api-access-bw6z9" (OuterVolumeSpecName: "kube-api-access-bw6z9") pod "513fd530-f145-4691-8829-cd2b828884d7" (UID: "513fd530-f145-4691-8829-cd2b828884d7"). InnerVolumeSpecName "kube-api-access-bw6z9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:05:56 crc kubenswrapper[5014]: I1006 22:05:56.944608 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bw6z9\" (UniqueName: \"kubernetes.io/projected/513fd530-f145-4691-8829-cd2b828884d7-kube-api-access-bw6z9\") on node \"crc\" DevicePath \"\"" Oct 06 22:05:56 crc kubenswrapper[5014]: I1006 22:05:56.944656 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/513fd530-f145-4691-8829-cd2b828884d7-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:05:57 crc kubenswrapper[5014]: I1006 22:05:57.602441 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9j6w" event={"ID":"513fd530-f145-4691-8829-cd2b828884d7","Type":"ContainerDied","Data":"dc3c068120ed178db50175d61e41e5e0365fe17c0f1c0712d84adc9bb916fb76"} Oct 06 22:05:57 crc kubenswrapper[5014]: I1006 22:05:57.602519 5014 scope.go:117] "RemoveContainer" containerID="32d8bece09bf1bde34c88a1f6b8a8585d32756c9df6be4bf7351640b8d2514fd" Oct 06 22:05:57 crc kubenswrapper[5014]: I1006 22:05:57.604228 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s9j6w" Oct 06 22:05:57 crc kubenswrapper[5014]: I1006 22:05:57.631220 5014 scope.go:117] "RemoveContainer" containerID="8d712bf5df5c967426e5d0becb9df3d8152f45564dc7f0eaeb7474c613bbf56f" Oct 06 22:05:57 crc kubenswrapper[5014]: I1006 22:05:57.663026 5014 scope.go:117] "RemoveContainer" containerID="34472cdf63ebc43b8831d01be61f24519c3d36960f9051645f08a2a5934a5ebd" Oct 06 22:05:57 crc kubenswrapper[5014]: I1006 22:05:57.866433 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/513fd530-f145-4691-8829-cd2b828884d7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "513fd530-f145-4691-8829-cd2b828884d7" (UID: "513fd530-f145-4691-8829-cd2b828884d7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:05:57 crc kubenswrapper[5014]: I1006 22:05:57.956391 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-s9j6w"] Oct 06 22:05:57 crc kubenswrapper[5014]: I1006 22:05:57.958669 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/513fd530-f145-4691-8829-cd2b828884d7-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:05:57 crc kubenswrapper[5014]: I1006 22:05:57.961835 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-s9j6w"] Oct 06 22:05:59 crc kubenswrapper[5014]: I1006 22:05:59.498755 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="513fd530-f145-4691-8829-cd2b828884d7" path="/var/lib/kubelet/pods/513fd530-f145-4691-8829-cd2b828884d7/volumes" Oct 06 22:06:21 crc kubenswrapper[5014]: I1006 22:06:21.735747 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:06:21 crc kubenswrapper[5014]: I1006 22:06:21.736506 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:06:51 crc kubenswrapper[5014]: I1006 22:06:51.735395 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:06:51 crc kubenswrapper[5014]: I1006 22:06:51.736977 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:06:51 crc kubenswrapper[5014]: I1006 22:06:51.737045 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 22:06:51 crc kubenswrapper[5014]: I1006 22:06:51.737814 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c1106c1c6304da4faf8da0a255de2a35cc578d87197c127c1e338edccadc53df"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 22:06:51 crc kubenswrapper[5014]: I1006 22:06:51.737889 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://c1106c1c6304da4faf8da0a255de2a35cc578d87197c127c1e338edccadc53df" gracePeriod=600 Oct 06 22:06:52 crc kubenswrapper[5014]: I1006 22:06:52.138091 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="c1106c1c6304da4faf8da0a255de2a35cc578d87197c127c1e338edccadc53df" exitCode=0 Oct 06 22:06:52 crc kubenswrapper[5014]: I1006 22:06:52.138153 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"c1106c1c6304da4faf8da0a255de2a35cc578d87197c127c1e338edccadc53df"} Oct 06 22:06:52 crc kubenswrapper[5014]: I1006 22:06:52.138210 5014 scope.go:117] "RemoveContainer" containerID="c6473d0481655aa48c69f3c5e4b236996d12c2a42948b9ffc1b8eac9e93f12aa" Oct 06 22:06:53 crc kubenswrapper[5014]: I1006 22:06:53.153742 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6"} Oct 06 22:09:21 crc kubenswrapper[5014]: I1006 22:09:21.735001 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:09:21 crc kubenswrapper[5014]: I1006 22:09:21.735859 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:09:51 crc kubenswrapper[5014]: I1006 22:09:51.735279 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:09:51 crc kubenswrapper[5014]: I1006 22:09:51.736231 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:09:53 crc kubenswrapper[5014]: I1006 22:09:53.864482 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2ks56"] Oct 06 22:09:53 crc kubenswrapper[5014]: E1006 22:09:53.865455 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="513fd530-f145-4691-8829-cd2b828884d7" containerName="extract-utilities" Oct 06 22:09:53 crc kubenswrapper[5014]: I1006 22:09:53.865480 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="513fd530-f145-4691-8829-cd2b828884d7" containerName="extract-utilities" Oct 06 22:09:53 crc kubenswrapper[5014]: E1006 22:09:53.865519 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="513fd530-f145-4691-8829-cd2b828884d7" containerName="registry-server" Oct 06 22:09:53 crc kubenswrapper[5014]: I1006 22:09:53.865536 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="513fd530-f145-4691-8829-cd2b828884d7" containerName="registry-server" Oct 06 22:09:53 crc kubenswrapper[5014]: E1006 22:09:53.865584 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="513fd530-f145-4691-8829-cd2b828884d7" containerName="extract-content" Oct 06 22:09:53 crc kubenswrapper[5014]: I1006 22:09:53.865596 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="513fd530-f145-4691-8829-cd2b828884d7" containerName="extract-content" Oct 06 22:09:53 crc kubenswrapper[5014]: I1006 22:09:53.865893 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="513fd530-f145-4691-8829-cd2b828884d7" containerName="registry-server" Oct 06 22:09:53 crc kubenswrapper[5014]: I1006 22:09:53.868235 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:09:53 crc kubenswrapper[5014]: I1006 22:09:53.878305 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2ks56"] Oct 06 22:09:53 crc kubenswrapper[5014]: I1006 22:09:53.933819 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-utilities\") pod \"redhat-operators-2ks56\" (UID: \"b0ac1329-4252-466d-bbe1-71a42dd0c5ac\") " pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:09:53 crc kubenswrapper[5014]: I1006 22:09:53.933890 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-catalog-content\") pod \"redhat-operators-2ks56\" (UID: \"b0ac1329-4252-466d-bbe1-71a42dd0c5ac\") " pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:09:53 crc kubenswrapper[5014]: I1006 22:09:53.933946 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slqrf\" (UniqueName: \"kubernetes.io/projected/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-kube-api-access-slqrf\") pod \"redhat-operators-2ks56\" (UID: \"b0ac1329-4252-466d-bbe1-71a42dd0c5ac\") " pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:09:54 crc kubenswrapper[5014]: I1006 22:09:54.035142 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-catalog-content\") pod \"redhat-operators-2ks56\" (UID: \"b0ac1329-4252-466d-bbe1-71a42dd0c5ac\") " pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:09:54 crc kubenswrapper[5014]: I1006 22:09:54.035286 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slqrf\" (UniqueName: \"kubernetes.io/projected/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-kube-api-access-slqrf\") pod \"redhat-operators-2ks56\" (UID: \"b0ac1329-4252-466d-bbe1-71a42dd0c5ac\") " pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:09:54 crc kubenswrapper[5014]: I1006 22:09:54.035385 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-utilities\") pod \"redhat-operators-2ks56\" (UID: \"b0ac1329-4252-466d-bbe1-71a42dd0c5ac\") " pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:09:54 crc kubenswrapper[5014]: I1006 22:09:54.035952 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-catalog-content\") pod \"redhat-operators-2ks56\" (UID: \"b0ac1329-4252-466d-bbe1-71a42dd0c5ac\") " pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:09:54 crc kubenswrapper[5014]: I1006 22:09:54.036159 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-utilities\") pod \"redhat-operators-2ks56\" (UID: \"b0ac1329-4252-466d-bbe1-71a42dd0c5ac\") " pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:09:54 crc kubenswrapper[5014]: I1006 22:09:54.070214 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slqrf\" (UniqueName: \"kubernetes.io/projected/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-kube-api-access-slqrf\") pod \"redhat-operators-2ks56\" (UID: \"b0ac1329-4252-466d-bbe1-71a42dd0c5ac\") " pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:09:54 crc kubenswrapper[5014]: I1006 22:09:54.194269 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:09:54 crc kubenswrapper[5014]: I1006 22:09:54.647639 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2ks56"] Oct 06 22:09:54 crc kubenswrapper[5014]: I1006 22:09:54.890802 5014 generic.go:334] "Generic (PLEG): container finished" podID="b0ac1329-4252-466d-bbe1-71a42dd0c5ac" containerID="921dc1552886addb8fcff5f3e4b8d259cae5c4a7ef648078ad682838f450de15" exitCode=0 Oct 06 22:09:54 crc kubenswrapper[5014]: I1006 22:09:54.890855 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2ks56" event={"ID":"b0ac1329-4252-466d-bbe1-71a42dd0c5ac","Type":"ContainerDied","Data":"921dc1552886addb8fcff5f3e4b8d259cae5c4a7ef648078ad682838f450de15"} Oct 06 22:09:54 crc kubenswrapper[5014]: I1006 22:09:54.890888 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2ks56" event={"ID":"b0ac1329-4252-466d-bbe1-71a42dd0c5ac","Type":"ContainerStarted","Data":"6a1afedf0cc42f8ac28817502704b341600baf491dab54c2b5e1259dd24b822f"} Oct 06 22:09:55 crc kubenswrapper[5014]: I1006 22:09:55.901834 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2ks56" event={"ID":"b0ac1329-4252-466d-bbe1-71a42dd0c5ac","Type":"ContainerStarted","Data":"44282660a4ececbfdb5828aa7eb4aeb0cc2c61e33a1f26c7cc5af1580f75fda9"} Oct 06 22:09:56 crc kubenswrapper[5014]: I1006 22:09:56.863895 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-n4lmq"] Oct 06 22:09:56 crc kubenswrapper[5014]: I1006 22:09:56.866956 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:09:56 crc kubenswrapper[5014]: I1006 22:09:56.872363 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n4lmq"] Oct 06 22:09:56 crc kubenswrapper[5014]: I1006 22:09:56.881519 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-utilities\") pod \"community-operators-n4lmq\" (UID: \"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb\") " pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:09:56 crc kubenswrapper[5014]: I1006 22:09:56.881602 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-catalog-content\") pod \"community-operators-n4lmq\" (UID: \"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb\") " pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:09:56 crc kubenswrapper[5014]: I1006 22:09:56.881970 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gwvm\" (UniqueName: \"kubernetes.io/projected/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-kube-api-access-2gwvm\") pod \"community-operators-n4lmq\" (UID: \"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb\") " pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:09:56 crc kubenswrapper[5014]: I1006 22:09:56.915333 5014 generic.go:334] "Generic (PLEG): container finished" podID="b0ac1329-4252-466d-bbe1-71a42dd0c5ac" containerID="44282660a4ececbfdb5828aa7eb4aeb0cc2c61e33a1f26c7cc5af1580f75fda9" exitCode=0 Oct 06 22:09:56 crc kubenswrapper[5014]: I1006 22:09:56.915378 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2ks56" event={"ID":"b0ac1329-4252-466d-bbe1-71a42dd0c5ac","Type":"ContainerDied","Data":"44282660a4ececbfdb5828aa7eb4aeb0cc2c61e33a1f26c7cc5af1580f75fda9"} Oct 06 22:09:56 crc kubenswrapper[5014]: I1006 22:09:56.983312 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-catalog-content\") pod \"community-operators-n4lmq\" (UID: \"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb\") " pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:09:56 crc kubenswrapper[5014]: I1006 22:09:56.983431 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2gwvm\" (UniqueName: \"kubernetes.io/projected/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-kube-api-access-2gwvm\") pod \"community-operators-n4lmq\" (UID: \"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb\") " pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:09:56 crc kubenswrapper[5014]: I1006 22:09:56.983500 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-utilities\") pod \"community-operators-n4lmq\" (UID: \"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb\") " pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:09:56 crc kubenswrapper[5014]: I1006 22:09:56.984020 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-utilities\") pod \"community-operators-n4lmq\" (UID: \"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb\") " pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:09:56 crc kubenswrapper[5014]: I1006 22:09:56.984318 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-catalog-content\") pod \"community-operators-n4lmq\" (UID: \"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb\") " pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:09:57 crc kubenswrapper[5014]: I1006 22:09:57.008930 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gwvm\" (UniqueName: \"kubernetes.io/projected/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-kube-api-access-2gwvm\") pod \"community-operators-n4lmq\" (UID: \"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb\") " pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:09:57 crc kubenswrapper[5014]: I1006 22:09:57.197237 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:09:57 crc kubenswrapper[5014]: I1006 22:09:57.568433 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n4lmq"] Oct 06 22:09:57 crc kubenswrapper[5014]: W1006 22:09:57.569834 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda8ae1b67_2a0f_4c16_95d4_b0cd9f0606bb.slice/crio-3af39a7f46177c6b8552f75874a89391a91f5e561670344411ae3a34f09dfe5b WatchSource:0}: Error finding container 3af39a7f46177c6b8552f75874a89391a91f5e561670344411ae3a34f09dfe5b: Status 404 returned error can't find the container with id 3af39a7f46177c6b8552f75874a89391a91f5e561670344411ae3a34f09dfe5b Oct 06 22:09:57 crc kubenswrapper[5014]: I1006 22:09:57.926919 5014 generic.go:334] "Generic (PLEG): container finished" podID="a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb" containerID="dbcb5a8407338b9c055b487276bfdd1a6104c750024fe92847b71344dcadef40" exitCode=0 Oct 06 22:09:57 crc kubenswrapper[5014]: I1006 22:09:57.927133 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4lmq" event={"ID":"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb","Type":"ContainerDied","Data":"dbcb5a8407338b9c055b487276bfdd1a6104c750024fe92847b71344dcadef40"} Oct 06 22:09:57 crc kubenswrapper[5014]: I1006 22:09:57.927158 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4lmq" event={"ID":"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb","Type":"ContainerStarted","Data":"3af39a7f46177c6b8552f75874a89391a91f5e561670344411ae3a34f09dfe5b"} Oct 06 22:09:57 crc kubenswrapper[5014]: I1006 22:09:57.931743 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2ks56" event={"ID":"b0ac1329-4252-466d-bbe1-71a42dd0c5ac","Type":"ContainerStarted","Data":"72bdb8edb91c9a347ba239f54ccd0e5b5a19f610ba94661778c981dc8718557d"} Oct 06 22:09:59 crc kubenswrapper[5014]: I1006 22:09:59.966714 5014 generic.go:334] "Generic (PLEG): container finished" podID="a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb" containerID="b358e1c2c1193eff550303664212ddaf4175b4a5c5cecfd6ea19c600ccd7c44e" exitCode=0 Oct 06 22:09:59 crc kubenswrapper[5014]: I1006 22:09:59.966774 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4lmq" event={"ID":"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb","Type":"ContainerDied","Data":"b358e1c2c1193eff550303664212ddaf4175b4a5c5cecfd6ea19c600ccd7c44e"} Oct 06 22:09:59 crc kubenswrapper[5014]: I1006 22:09:59.995255 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2ks56" podStartSLOduration=4.51330296 podStartE2EDuration="6.995229979s" podCreationTimestamp="2025-10-06 22:09:53 +0000 UTC" firstStartedPulling="2025-10-06 22:09:54.892253085 +0000 UTC m=+2340.185289819" lastFinishedPulling="2025-10-06 22:09:57.374180104 +0000 UTC m=+2342.667216838" observedRunningTime="2025-10-06 22:09:57.982474049 +0000 UTC m=+2343.275510803" watchObservedRunningTime="2025-10-06 22:09:59.995229979 +0000 UTC m=+2345.288266723" Oct 06 22:10:00 crc kubenswrapper[5014]: I1006 22:10:00.977974 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4lmq" event={"ID":"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb","Type":"ContainerStarted","Data":"5f025d6b28fc1111d0b9a1393f85449bce464bfc73df5c608b484e17d3ca5db1"} Oct 06 22:10:01 crc kubenswrapper[5014]: I1006 22:10:01.009893 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-n4lmq" podStartSLOduration=2.5287032419999997 podStartE2EDuration="5.009872758s" podCreationTimestamp="2025-10-06 22:09:56 +0000 UTC" firstStartedPulling="2025-10-06 22:09:57.928352337 +0000 UTC m=+2343.221389071" lastFinishedPulling="2025-10-06 22:10:00.409521843 +0000 UTC m=+2345.702558587" observedRunningTime="2025-10-06 22:10:01.004654234 +0000 UTC m=+2346.297691008" watchObservedRunningTime="2025-10-06 22:10:01.009872758 +0000 UTC m=+2346.302909492" Oct 06 22:10:04 crc kubenswrapper[5014]: I1006 22:10:04.195015 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:10:04 crc kubenswrapper[5014]: I1006 22:10:04.195546 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:10:04 crc kubenswrapper[5014]: I1006 22:10:04.274837 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:10:05 crc kubenswrapper[5014]: I1006 22:10:05.087450 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:10:05 crc kubenswrapper[5014]: I1006 22:10:05.148492 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2ks56"] Oct 06 22:10:07 crc kubenswrapper[5014]: I1006 22:10:07.036976 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2ks56" podUID="b0ac1329-4252-466d-bbe1-71a42dd0c5ac" containerName="registry-server" containerID="cri-o://72bdb8edb91c9a347ba239f54ccd0e5b5a19f610ba94661778c981dc8718557d" gracePeriod=2 Oct 06 22:10:07 crc kubenswrapper[5014]: I1006 22:10:07.197677 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:10:07 crc kubenswrapper[5014]: I1006 22:10:07.197997 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:10:07 crc kubenswrapper[5014]: I1006 22:10:07.274708 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.026414 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.056413 5014 generic.go:334] "Generic (PLEG): container finished" podID="b0ac1329-4252-466d-bbe1-71a42dd0c5ac" containerID="72bdb8edb91c9a347ba239f54ccd0e5b5a19f610ba94661778c981dc8718557d" exitCode=0 Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.056489 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2ks56" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.056546 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2ks56" event={"ID":"b0ac1329-4252-466d-bbe1-71a42dd0c5ac","Type":"ContainerDied","Data":"72bdb8edb91c9a347ba239f54ccd0e5b5a19f610ba94661778c981dc8718557d"} Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.056778 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2ks56" event={"ID":"b0ac1329-4252-466d-bbe1-71a42dd0c5ac","Type":"ContainerDied","Data":"6a1afedf0cc42f8ac28817502704b341600baf491dab54c2b5e1259dd24b822f"} Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.056879 5014 scope.go:117] "RemoveContainer" containerID="72bdb8edb91c9a347ba239f54ccd0e5b5a19f610ba94661778c981dc8718557d" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.094924 5014 scope.go:117] "RemoveContainer" containerID="44282660a4ececbfdb5828aa7eb4aeb0cc2c61e33a1f26c7cc5af1580f75fda9" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.115435 5014 scope.go:117] "RemoveContainer" containerID="921dc1552886addb8fcff5f3e4b8d259cae5c4a7ef648078ad682838f450de15" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.123291 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.137847 5014 scope.go:117] "RemoveContainer" containerID="72bdb8edb91c9a347ba239f54ccd0e5b5a19f610ba94661778c981dc8718557d" Oct 06 22:10:08 crc kubenswrapper[5014]: E1006 22:10:08.138551 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72bdb8edb91c9a347ba239f54ccd0e5b5a19f610ba94661778c981dc8718557d\": container with ID starting with 72bdb8edb91c9a347ba239f54ccd0e5b5a19f610ba94661778c981dc8718557d not found: ID does not exist" containerID="72bdb8edb91c9a347ba239f54ccd0e5b5a19f610ba94661778c981dc8718557d" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.138657 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72bdb8edb91c9a347ba239f54ccd0e5b5a19f610ba94661778c981dc8718557d"} err="failed to get container status \"72bdb8edb91c9a347ba239f54ccd0e5b5a19f610ba94661778c981dc8718557d\": rpc error: code = NotFound desc = could not find container \"72bdb8edb91c9a347ba239f54ccd0e5b5a19f610ba94661778c981dc8718557d\": container with ID starting with 72bdb8edb91c9a347ba239f54ccd0e5b5a19f610ba94661778c981dc8718557d not found: ID does not exist" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.138720 5014 scope.go:117] "RemoveContainer" containerID="44282660a4ececbfdb5828aa7eb4aeb0cc2c61e33a1f26c7cc5af1580f75fda9" Oct 06 22:10:08 crc kubenswrapper[5014]: E1006 22:10:08.139275 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44282660a4ececbfdb5828aa7eb4aeb0cc2c61e33a1f26c7cc5af1580f75fda9\": container with ID starting with 44282660a4ececbfdb5828aa7eb4aeb0cc2c61e33a1f26c7cc5af1580f75fda9 not found: ID does not exist" containerID="44282660a4ececbfdb5828aa7eb4aeb0cc2c61e33a1f26c7cc5af1580f75fda9" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.139311 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44282660a4ececbfdb5828aa7eb4aeb0cc2c61e33a1f26c7cc5af1580f75fda9"} err="failed to get container status \"44282660a4ececbfdb5828aa7eb4aeb0cc2c61e33a1f26c7cc5af1580f75fda9\": rpc error: code = NotFound desc = could not find container \"44282660a4ececbfdb5828aa7eb4aeb0cc2c61e33a1f26c7cc5af1580f75fda9\": container with ID starting with 44282660a4ececbfdb5828aa7eb4aeb0cc2c61e33a1f26c7cc5af1580f75fda9 not found: ID does not exist" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.139357 5014 scope.go:117] "RemoveContainer" containerID="921dc1552886addb8fcff5f3e4b8d259cae5c4a7ef648078ad682838f450de15" Oct 06 22:10:08 crc kubenswrapper[5014]: E1006 22:10:08.143920 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"921dc1552886addb8fcff5f3e4b8d259cae5c4a7ef648078ad682838f450de15\": container with ID starting with 921dc1552886addb8fcff5f3e4b8d259cae5c4a7ef648078ad682838f450de15 not found: ID does not exist" containerID="921dc1552886addb8fcff5f3e4b8d259cae5c4a7ef648078ad682838f450de15" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.143982 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"921dc1552886addb8fcff5f3e4b8d259cae5c4a7ef648078ad682838f450de15"} err="failed to get container status \"921dc1552886addb8fcff5f3e4b8d259cae5c4a7ef648078ad682838f450de15\": rpc error: code = NotFound desc = could not find container \"921dc1552886addb8fcff5f3e4b8d259cae5c4a7ef648078ad682838f450de15\": container with ID starting with 921dc1552886addb8fcff5f3e4b8d259cae5c4a7ef648078ad682838f450de15 not found: ID does not exist" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.170179 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-catalog-content\") pod \"b0ac1329-4252-466d-bbe1-71a42dd0c5ac\" (UID: \"b0ac1329-4252-466d-bbe1-71a42dd0c5ac\") " Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.170241 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slqrf\" (UniqueName: \"kubernetes.io/projected/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-kube-api-access-slqrf\") pod \"b0ac1329-4252-466d-bbe1-71a42dd0c5ac\" (UID: \"b0ac1329-4252-466d-bbe1-71a42dd0c5ac\") " Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.170330 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-utilities\") pod \"b0ac1329-4252-466d-bbe1-71a42dd0c5ac\" (UID: \"b0ac1329-4252-466d-bbe1-71a42dd0c5ac\") " Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.171471 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-utilities" (OuterVolumeSpecName: "utilities") pod "b0ac1329-4252-466d-bbe1-71a42dd0c5ac" (UID: "b0ac1329-4252-466d-bbe1-71a42dd0c5ac"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.176640 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-kube-api-access-slqrf" (OuterVolumeSpecName: "kube-api-access-slqrf") pod "b0ac1329-4252-466d-bbe1-71a42dd0c5ac" (UID: "b0ac1329-4252-466d-bbe1-71a42dd0c5ac"). InnerVolumeSpecName "kube-api-access-slqrf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.272471 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.272534 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slqrf\" (UniqueName: \"kubernetes.io/projected/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-kube-api-access-slqrf\") on node \"crc\" DevicePath \"\"" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.277087 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b0ac1329-4252-466d-bbe1-71a42dd0c5ac" (UID: "b0ac1329-4252-466d-bbe1-71a42dd0c5ac"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.374007 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0ac1329-4252-466d-bbe1-71a42dd0c5ac-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.411399 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2ks56"] Oct 06 22:10:08 crc kubenswrapper[5014]: I1006 22:10:08.422258 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2ks56"] Oct 06 22:10:09 crc kubenswrapper[5014]: I1006 22:10:09.500823 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0ac1329-4252-466d-bbe1-71a42dd0c5ac" path="/var/lib/kubelet/pods/b0ac1329-4252-466d-bbe1-71a42dd0c5ac/volumes" Oct 06 22:10:10 crc kubenswrapper[5014]: I1006 22:10:10.446165 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n4lmq"] Oct 06 22:10:11 crc kubenswrapper[5014]: I1006 22:10:11.088919 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-n4lmq" podUID="a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb" containerName="registry-server" containerID="cri-o://5f025d6b28fc1111d0b9a1393f85449bce464bfc73df5c608b484e17d3ca5db1" gracePeriod=2 Oct 06 22:10:11 crc kubenswrapper[5014]: I1006 22:10:11.656277 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:10:11 crc kubenswrapper[5014]: I1006 22:10:11.830787 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2gwvm\" (UniqueName: \"kubernetes.io/projected/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-kube-api-access-2gwvm\") pod \"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb\" (UID: \"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb\") " Oct 06 22:10:11 crc kubenswrapper[5014]: I1006 22:10:11.830909 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-catalog-content\") pod \"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb\" (UID: \"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb\") " Oct 06 22:10:11 crc kubenswrapper[5014]: I1006 22:10:11.831023 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-utilities\") pod \"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb\" (UID: \"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb\") " Oct 06 22:10:11 crc kubenswrapper[5014]: I1006 22:10:11.832732 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-utilities" (OuterVolumeSpecName: "utilities") pod "a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb" (UID: "a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:10:11 crc kubenswrapper[5014]: I1006 22:10:11.837798 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-kube-api-access-2gwvm" (OuterVolumeSpecName: "kube-api-access-2gwvm") pod "a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb" (UID: "a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb"). InnerVolumeSpecName "kube-api-access-2gwvm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:10:11 crc kubenswrapper[5014]: I1006 22:10:11.902173 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb" (UID: "a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:10:11 crc kubenswrapper[5014]: I1006 22:10:11.932599 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2gwvm\" (UniqueName: \"kubernetes.io/projected/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-kube-api-access-2gwvm\") on node \"crc\" DevicePath \"\"" Oct 06 22:10:11 crc kubenswrapper[5014]: I1006 22:10:11.932666 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:10:11 crc kubenswrapper[5014]: I1006 22:10:11.932679 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:10:12 crc kubenswrapper[5014]: I1006 22:10:12.100380 5014 generic.go:334] "Generic (PLEG): container finished" podID="a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb" containerID="5f025d6b28fc1111d0b9a1393f85449bce464bfc73df5c608b484e17d3ca5db1" exitCode=0 Oct 06 22:10:12 crc kubenswrapper[5014]: I1006 22:10:12.100431 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4lmq" event={"ID":"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb","Type":"ContainerDied","Data":"5f025d6b28fc1111d0b9a1393f85449bce464bfc73df5c608b484e17d3ca5db1"} Oct 06 22:10:12 crc kubenswrapper[5014]: I1006 22:10:12.100521 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n4lmq" event={"ID":"a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb","Type":"ContainerDied","Data":"3af39a7f46177c6b8552f75874a89391a91f5e561670344411ae3a34f09dfe5b"} Oct 06 22:10:12 crc kubenswrapper[5014]: I1006 22:10:12.100560 5014 scope.go:117] "RemoveContainer" containerID="5f025d6b28fc1111d0b9a1393f85449bce464bfc73df5c608b484e17d3ca5db1" Oct 06 22:10:12 crc kubenswrapper[5014]: I1006 22:10:12.100446 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n4lmq" Oct 06 22:10:12 crc kubenswrapper[5014]: I1006 22:10:12.126419 5014 scope.go:117] "RemoveContainer" containerID="b358e1c2c1193eff550303664212ddaf4175b4a5c5cecfd6ea19c600ccd7c44e" Oct 06 22:10:12 crc kubenswrapper[5014]: I1006 22:10:12.144300 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n4lmq"] Oct 06 22:10:12 crc kubenswrapper[5014]: I1006 22:10:12.149758 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-n4lmq"] Oct 06 22:10:12 crc kubenswrapper[5014]: I1006 22:10:12.175682 5014 scope.go:117] "RemoveContainer" containerID="dbcb5a8407338b9c055b487276bfdd1a6104c750024fe92847b71344dcadef40" Oct 06 22:10:12 crc kubenswrapper[5014]: I1006 22:10:12.195805 5014 scope.go:117] "RemoveContainer" containerID="5f025d6b28fc1111d0b9a1393f85449bce464bfc73df5c608b484e17d3ca5db1" Oct 06 22:10:12 crc kubenswrapper[5014]: E1006 22:10:12.196332 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f025d6b28fc1111d0b9a1393f85449bce464bfc73df5c608b484e17d3ca5db1\": container with ID starting with 5f025d6b28fc1111d0b9a1393f85449bce464bfc73df5c608b484e17d3ca5db1 not found: ID does not exist" containerID="5f025d6b28fc1111d0b9a1393f85449bce464bfc73df5c608b484e17d3ca5db1" Oct 06 22:10:12 crc kubenswrapper[5014]: I1006 22:10:12.196385 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f025d6b28fc1111d0b9a1393f85449bce464bfc73df5c608b484e17d3ca5db1"} err="failed to get container status \"5f025d6b28fc1111d0b9a1393f85449bce464bfc73df5c608b484e17d3ca5db1\": rpc error: code = NotFound desc = could not find container \"5f025d6b28fc1111d0b9a1393f85449bce464bfc73df5c608b484e17d3ca5db1\": container with ID starting with 5f025d6b28fc1111d0b9a1393f85449bce464bfc73df5c608b484e17d3ca5db1 not found: ID does not exist" Oct 06 22:10:12 crc kubenswrapper[5014]: I1006 22:10:12.196412 5014 scope.go:117] "RemoveContainer" containerID="b358e1c2c1193eff550303664212ddaf4175b4a5c5cecfd6ea19c600ccd7c44e" Oct 06 22:10:12 crc kubenswrapper[5014]: E1006 22:10:12.196851 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b358e1c2c1193eff550303664212ddaf4175b4a5c5cecfd6ea19c600ccd7c44e\": container with ID starting with b358e1c2c1193eff550303664212ddaf4175b4a5c5cecfd6ea19c600ccd7c44e not found: ID does not exist" containerID="b358e1c2c1193eff550303664212ddaf4175b4a5c5cecfd6ea19c600ccd7c44e" Oct 06 22:10:12 crc kubenswrapper[5014]: I1006 22:10:12.196877 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b358e1c2c1193eff550303664212ddaf4175b4a5c5cecfd6ea19c600ccd7c44e"} err="failed to get container status \"b358e1c2c1193eff550303664212ddaf4175b4a5c5cecfd6ea19c600ccd7c44e\": rpc error: code = NotFound desc = could not find container \"b358e1c2c1193eff550303664212ddaf4175b4a5c5cecfd6ea19c600ccd7c44e\": container with ID starting with b358e1c2c1193eff550303664212ddaf4175b4a5c5cecfd6ea19c600ccd7c44e not found: ID does not exist" Oct 06 22:10:12 crc kubenswrapper[5014]: I1006 22:10:12.196921 5014 scope.go:117] "RemoveContainer" containerID="dbcb5a8407338b9c055b487276bfdd1a6104c750024fe92847b71344dcadef40" Oct 06 22:10:12 crc kubenswrapper[5014]: E1006 22:10:12.197172 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbcb5a8407338b9c055b487276bfdd1a6104c750024fe92847b71344dcadef40\": container with ID starting with dbcb5a8407338b9c055b487276bfdd1a6104c750024fe92847b71344dcadef40 not found: ID does not exist" containerID="dbcb5a8407338b9c055b487276bfdd1a6104c750024fe92847b71344dcadef40" Oct 06 22:10:12 crc kubenswrapper[5014]: I1006 22:10:12.197191 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbcb5a8407338b9c055b487276bfdd1a6104c750024fe92847b71344dcadef40"} err="failed to get container status \"dbcb5a8407338b9c055b487276bfdd1a6104c750024fe92847b71344dcadef40\": rpc error: code = NotFound desc = could not find container \"dbcb5a8407338b9c055b487276bfdd1a6104c750024fe92847b71344dcadef40\": container with ID starting with dbcb5a8407338b9c055b487276bfdd1a6104c750024fe92847b71344dcadef40 not found: ID does not exist" Oct 06 22:10:13 crc kubenswrapper[5014]: I1006 22:10:13.520111 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb" path="/var/lib/kubelet/pods/a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb/volumes" Oct 06 22:10:21 crc kubenswrapper[5014]: I1006 22:10:21.735004 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:10:21 crc kubenswrapper[5014]: I1006 22:10:21.736014 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:10:21 crc kubenswrapper[5014]: I1006 22:10:21.736091 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 22:10:21 crc kubenswrapper[5014]: I1006 22:10:21.736911 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 22:10:21 crc kubenswrapper[5014]: I1006 22:10:21.737014 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" gracePeriod=600 Oct 06 22:10:21 crc kubenswrapper[5014]: E1006 22:10:21.880313 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:10:22 crc kubenswrapper[5014]: I1006 22:10:22.218377 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" exitCode=0 Oct 06 22:10:22 crc kubenswrapper[5014]: I1006 22:10:22.218478 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6"} Oct 06 22:10:22 crc kubenswrapper[5014]: I1006 22:10:22.218586 5014 scope.go:117] "RemoveContainer" containerID="c1106c1c6304da4faf8da0a255de2a35cc578d87197c127c1e338edccadc53df" Oct 06 22:10:22 crc kubenswrapper[5014]: I1006 22:10:22.219270 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:10:22 crc kubenswrapper[5014]: E1006 22:10:22.219574 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:10:37 crc kubenswrapper[5014]: I1006 22:10:37.484906 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:10:37 crc kubenswrapper[5014]: E1006 22:10:37.485792 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:10:52 crc kubenswrapper[5014]: I1006 22:10:52.485563 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:10:52 crc kubenswrapper[5014]: E1006 22:10:52.486930 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:11:06 crc kubenswrapper[5014]: I1006 22:11:06.484614 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:11:06 crc kubenswrapper[5014]: E1006 22:11:06.485567 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:11:18 crc kubenswrapper[5014]: I1006 22:11:18.484774 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:11:18 crc kubenswrapper[5014]: E1006 22:11:18.485816 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:11:29 crc kubenswrapper[5014]: I1006 22:11:29.484682 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:11:29 crc kubenswrapper[5014]: E1006 22:11:29.485676 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:11:44 crc kubenswrapper[5014]: I1006 22:11:44.484780 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:11:44 crc kubenswrapper[5014]: E1006 22:11:44.485727 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:11:56 crc kubenswrapper[5014]: I1006 22:11:56.484674 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:11:56 crc kubenswrapper[5014]: E1006 22:11:56.486108 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:12:07 crc kubenswrapper[5014]: I1006 22:12:07.485141 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:12:07 crc kubenswrapper[5014]: E1006 22:12:07.486476 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:12:22 crc kubenswrapper[5014]: I1006 22:12:22.484552 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:12:22 crc kubenswrapper[5014]: E1006 22:12:22.485573 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:12:35 crc kubenswrapper[5014]: I1006 22:12:35.489366 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:12:35 crc kubenswrapper[5014]: E1006 22:12:35.490061 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:12:49 crc kubenswrapper[5014]: I1006 22:12:49.485985 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:12:49 crc kubenswrapper[5014]: E1006 22:12:49.487395 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:13:04 crc kubenswrapper[5014]: I1006 22:13:04.484786 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:13:04 crc kubenswrapper[5014]: E1006 22:13:04.485605 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:13:15 crc kubenswrapper[5014]: I1006 22:13:15.492685 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:13:15 crc kubenswrapper[5014]: E1006 22:13:15.493814 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:13:29 crc kubenswrapper[5014]: I1006 22:13:29.485524 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:13:29 crc kubenswrapper[5014]: E1006 22:13:29.486701 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:13:43 crc kubenswrapper[5014]: I1006 22:13:43.485506 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:13:43 crc kubenswrapper[5014]: E1006 22:13:43.488817 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:13:57 crc kubenswrapper[5014]: I1006 22:13:57.485549 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:13:57 crc kubenswrapper[5014]: E1006 22:13:57.486576 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:14:08 crc kubenswrapper[5014]: I1006 22:14:08.485199 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:14:08 crc kubenswrapper[5014]: E1006 22:14:08.486211 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:14:23 crc kubenswrapper[5014]: I1006 22:14:23.485948 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:14:23 crc kubenswrapper[5014]: E1006 22:14:23.487000 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:14:35 crc kubenswrapper[5014]: I1006 22:14:35.493290 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:14:35 crc kubenswrapper[5014]: E1006 22:14:35.494128 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:14:49 crc kubenswrapper[5014]: I1006 22:14:49.485296 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:14:49 crc kubenswrapper[5014]: E1006 22:14:49.486405 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.168542 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s"] Oct 06 22:15:00 crc kubenswrapper[5014]: E1006 22:15:00.169762 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0ac1329-4252-466d-bbe1-71a42dd0c5ac" containerName="extract-utilities" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.169783 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0ac1329-4252-466d-bbe1-71a42dd0c5ac" containerName="extract-utilities" Oct 06 22:15:00 crc kubenswrapper[5014]: E1006 22:15:00.169802 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb" containerName="extract-content" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.169812 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb" containerName="extract-content" Oct 06 22:15:00 crc kubenswrapper[5014]: E1006 22:15:00.169829 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb" containerName="registry-server" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.169841 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb" containerName="registry-server" Oct 06 22:15:00 crc kubenswrapper[5014]: E1006 22:15:00.169861 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0ac1329-4252-466d-bbe1-71a42dd0c5ac" containerName="extract-content" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.169872 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0ac1329-4252-466d-bbe1-71a42dd0c5ac" containerName="extract-content" Oct 06 22:15:00 crc kubenswrapper[5014]: E1006 22:15:00.169904 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb" containerName="extract-utilities" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.169914 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb" containerName="extract-utilities" Oct 06 22:15:00 crc kubenswrapper[5014]: E1006 22:15:00.169932 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0ac1329-4252-466d-bbe1-71a42dd0c5ac" containerName="registry-server" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.169941 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0ac1329-4252-466d-bbe1-71a42dd0c5ac" containerName="registry-server" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.170185 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8ae1b67-2a0f-4c16-95d4-b0cd9f0606bb" containerName="registry-server" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.170209 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0ac1329-4252-466d-bbe1-71a42dd0c5ac" containerName="registry-server" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.170823 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.174578 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.174654 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.192828 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s"] Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.268385 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/387f2808-42e5-4664-80cf-9c267b71a798-config-volume\") pod \"collect-profiles-29329815-s8g4s\" (UID: \"387f2808-42e5-4664-80cf-9c267b71a798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.268428 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/387f2808-42e5-4664-80cf-9c267b71a798-secret-volume\") pod \"collect-profiles-29329815-s8g4s\" (UID: \"387f2808-42e5-4664-80cf-9c267b71a798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.268466 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhcc7\" (UniqueName: \"kubernetes.io/projected/387f2808-42e5-4664-80cf-9c267b71a798-kube-api-access-rhcc7\") pod \"collect-profiles-29329815-s8g4s\" (UID: \"387f2808-42e5-4664-80cf-9c267b71a798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.369883 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/387f2808-42e5-4664-80cf-9c267b71a798-config-volume\") pod \"collect-profiles-29329815-s8g4s\" (UID: \"387f2808-42e5-4664-80cf-9c267b71a798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.369934 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/387f2808-42e5-4664-80cf-9c267b71a798-secret-volume\") pod \"collect-profiles-29329815-s8g4s\" (UID: \"387f2808-42e5-4664-80cf-9c267b71a798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.369962 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhcc7\" (UniqueName: \"kubernetes.io/projected/387f2808-42e5-4664-80cf-9c267b71a798-kube-api-access-rhcc7\") pod \"collect-profiles-29329815-s8g4s\" (UID: \"387f2808-42e5-4664-80cf-9c267b71a798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.371564 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/387f2808-42e5-4664-80cf-9c267b71a798-config-volume\") pod \"collect-profiles-29329815-s8g4s\" (UID: \"387f2808-42e5-4664-80cf-9c267b71a798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.377266 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/387f2808-42e5-4664-80cf-9c267b71a798-secret-volume\") pod \"collect-profiles-29329815-s8g4s\" (UID: \"387f2808-42e5-4664-80cf-9c267b71a798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.407004 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhcc7\" (UniqueName: \"kubernetes.io/projected/387f2808-42e5-4664-80cf-9c267b71a798-kube-api-access-rhcc7\") pod \"collect-profiles-29329815-s8g4s\" (UID: \"387f2808-42e5-4664-80cf-9c267b71a798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.498220 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s" Oct 06 22:15:00 crc kubenswrapper[5014]: I1006 22:15:00.983037 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s"] Oct 06 22:15:01 crc kubenswrapper[5014]: I1006 22:15:01.921752 5014 generic.go:334] "Generic (PLEG): container finished" podID="387f2808-42e5-4664-80cf-9c267b71a798" containerID="1b6d9e60a4d5cd0c545344dd95387e69e0a6def9295eddbeeb2ac73004266c92" exitCode=0 Oct 06 22:15:01 crc kubenswrapper[5014]: I1006 22:15:01.921838 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s" event={"ID":"387f2808-42e5-4664-80cf-9c267b71a798","Type":"ContainerDied","Data":"1b6d9e60a4d5cd0c545344dd95387e69e0a6def9295eddbeeb2ac73004266c92"} Oct 06 22:15:01 crc kubenswrapper[5014]: I1006 22:15:01.922283 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s" event={"ID":"387f2808-42e5-4664-80cf-9c267b71a798","Type":"ContainerStarted","Data":"8026c8df08998e30ff1c9c12e6669104c974496fc71e0a8d7fffbdc686e7b1cf"} Oct 06 22:15:03 crc kubenswrapper[5014]: I1006 22:15:03.325401 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s" Oct 06 22:15:03 crc kubenswrapper[5014]: I1006 22:15:03.430287 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/387f2808-42e5-4664-80cf-9c267b71a798-config-volume\") pod \"387f2808-42e5-4664-80cf-9c267b71a798\" (UID: \"387f2808-42e5-4664-80cf-9c267b71a798\") " Oct 06 22:15:03 crc kubenswrapper[5014]: I1006 22:15:03.430353 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhcc7\" (UniqueName: \"kubernetes.io/projected/387f2808-42e5-4664-80cf-9c267b71a798-kube-api-access-rhcc7\") pod \"387f2808-42e5-4664-80cf-9c267b71a798\" (UID: \"387f2808-42e5-4664-80cf-9c267b71a798\") " Oct 06 22:15:03 crc kubenswrapper[5014]: I1006 22:15:03.430406 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/387f2808-42e5-4664-80cf-9c267b71a798-secret-volume\") pod \"387f2808-42e5-4664-80cf-9c267b71a798\" (UID: \"387f2808-42e5-4664-80cf-9c267b71a798\") " Oct 06 22:15:03 crc kubenswrapper[5014]: I1006 22:15:03.431864 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/387f2808-42e5-4664-80cf-9c267b71a798-config-volume" (OuterVolumeSpecName: "config-volume") pod "387f2808-42e5-4664-80cf-9c267b71a798" (UID: "387f2808-42e5-4664-80cf-9c267b71a798"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:15:03 crc kubenswrapper[5014]: I1006 22:15:03.436732 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/387f2808-42e5-4664-80cf-9c267b71a798-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "387f2808-42e5-4664-80cf-9c267b71a798" (UID: "387f2808-42e5-4664-80cf-9c267b71a798"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 22:15:03 crc kubenswrapper[5014]: I1006 22:15:03.437525 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/387f2808-42e5-4664-80cf-9c267b71a798-kube-api-access-rhcc7" (OuterVolumeSpecName: "kube-api-access-rhcc7") pod "387f2808-42e5-4664-80cf-9c267b71a798" (UID: "387f2808-42e5-4664-80cf-9c267b71a798"). InnerVolumeSpecName "kube-api-access-rhcc7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:15:03 crc kubenswrapper[5014]: I1006 22:15:03.532602 5014 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/387f2808-42e5-4664-80cf-9c267b71a798-config-volume\") on node \"crc\" DevicePath \"\"" Oct 06 22:15:03 crc kubenswrapper[5014]: I1006 22:15:03.532661 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhcc7\" (UniqueName: \"kubernetes.io/projected/387f2808-42e5-4664-80cf-9c267b71a798-kube-api-access-rhcc7\") on node \"crc\" DevicePath \"\"" Oct 06 22:15:03 crc kubenswrapper[5014]: I1006 22:15:03.532674 5014 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/387f2808-42e5-4664-80cf-9c267b71a798-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 06 22:15:03 crc kubenswrapper[5014]: I1006 22:15:03.946735 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s" event={"ID":"387f2808-42e5-4664-80cf-9c267b71a798","Type":"ContainerDied","Data":"8026c8df08998e30ff1c9c12e6669104c974496fc71e0a8d7fffbdc686e7b1cf"} Oct 06 22:15:03 crc kubenswrapper[5014]: I1006 22:15:03.946800 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8026c8df08998e30ff1c9c12e6669104c974496fc71e0a8d7fffbdc686e7b1cf" Oct 06 22:15:03 crc kubenswrapper[5014]: I1006 22:15:03.946834 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s" Oct 06 22:15:04 crc kubenswrapper[5014]: I1006 22:15:04.424917 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb"] Oct 06 22:15:04 crc kubenswrapper[5014]: I1006 22:15:04.434282 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329770-k9xbb"] Oct 06 22:15:04 crc kubenswrapper[5014]: I1006 22:15:04.484858 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:15:04 crc kubenswrapper[5014]: E1006 22:15:04.485305 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:15:05 crc kubenswrapper[5014]: I1006 22:15:05.500882 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e659c79-33c6-49d3-a333-5280ece9fa5b" path="/var/lib/kubelet/pods/1e659c79-33c6-49d3-a333-5280ece9fa5b/volumes" Oct 06 22:15:15 crc kubenswrapper[5014]: I1006 22:15:15.493149 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:15:15 crc kubenswrapper[5014]: E1006 22:15:15.494170 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:15:30 crc kubenswrapper[5014]: I1006 22:15:30.484974 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:15:31 crc kubenswrapper[5014]: I1006 22:15:31.210716 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"dcc0d26c271b15739956d985f7ad3120a8d4e8910b238b26753fb5936d6e11b2"} Oct 06 22:15:37 crc kubenswrapper[5014]: I1006 22:15:37.863827 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ll4nd"] Oct 06 22:15:37 crc kubenswrapper[5014]: E1006 22:15:37.865384 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="387f2808-42e5-4664-80cf-9c267b71a798" containerName="collect-profiles" Oct 06 22:15:37 crc kubenswrapper[5014]: I1006 22:15:37.865402 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="387f2808-42e5-4664-80cf-9c267b71a798" containerName="collect-profiles" Oct 06 22:15:37 crc kubenswrapper[5014]: I1006 22:15:37.866449 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="387f2808-42e5-4664-80cf-9c267b71a798" containerName="collect-profiles" Oct 06 22:15:37 crc kubenswrapper[5014]: I1006 22:15:37.867667 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:37 crc kubenswrapper[5014]: I1006 22:15:37.895306 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ll4nd"] Oct 06 22:15:37 crc kubenswrapper[5014]: I1006 22:15:37.933140 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55d42a51-a88e-467e-ba4f-319c76c880f7-catalog-content\") pod \"redhat-marketplace-ll4nd\" (UID: \"55d42a51-a88e-467e-ba4f-319c76c880f7\") " pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:37 crc kubenswrapper[5014]: I1006 22:15:37.933376 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8q9x\" (UniqueName: \"kubernetes.io/projected/55d42a51-a88e-467e-ba4f-319c76c880f7-kube-api-access-h8q9x\") pod \"redhat-marketplace-ll4nd\" (UID: \"55d42a51-a88e-467e-ba4f-319c76c880f7\") " pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:37 crc kubenswrapper[5014]: I1006 22:15:37.933727 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55d42a51-a88e-467e-ba4f-319c76c880f7-utilities\") pod \"redhat-marketplace-ll4nd\" (UID: \"55d42a51-a88e-467e-ba4f-319c76c880f7\") " pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:38 crc kubenswrapper[5014]: I1006 22:15:38.034861 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55d42a51-a88e-467e-ba4f-319c76c880f7-utilities\") pod \"redhat-marketplace-ll4nd\" (UID: \"55d42a51-a88e-467e-ba4f-319c76c880f7\") " pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:38 crc kubenswrapper[5014]: I1006 22:15:38.034990 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55d42a51-a88e-467e-ba4f-319c76c880f7-catalog-content\") pod \"redhat-marketplace-ll4nd\" (UID: \"55d42a51-a88e-467e-ba4f-319c76c880f7\") " pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:38 crc kubenswrapper[5014]: I1006 22:15:38.035052 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8q9x\" (UniqueName: \"kubernetes.io/projected/55d42a51-a88e-467e-ba4f-319c76c880f7-kube-api-access-h8q9x\") pod \"redhat-marketplace-ll4nd\" (UID: \"55d42a51-a88e-467e-ba4f-319c76c880f7\") " pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:38 crc kubenswrapper[5014]: I1006 22:15:38.035545 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55d42a51-a88e-467e-ba4f-319c76c880f7-utilities\") pod \"redhat-marketplace-ll4nd\" (UID: \"55d42a51-a88e-467e-ba4f-319c76c880f7\") " pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:38 crc kubenswrapper[5014]: I1006 22:15:38.035598 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55d42a51-a88e-467e-ba4f-319c76c880f7-catalog-content\") pod \"redhat-marketplace-ll4nd\" (UID: \"55d42a51-a88e-467e-ba4f-319c76c880f7\") " pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:38 crc kubenswrapper[5014]: I1006 22:15:38.057753 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8q9x\" (UniqueName: \"kubernetes.io/projected/55d42a51-a88e-467e-ba4f-319c76c880f7-kube-api-access-h8q9x\") pod \"redhat-marketplace-ll4nd\" (UID: \"55d42a51-a88e-467e-ba4f-319c76c880f7\") " pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:38 crc kubenswrapper[5014]: I1006 22:15:38.201390 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:38 crc kubenswrapper[5014]: I1006 22:15:38.443639 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ll4nd"] Oct 06 22:15:39 crc kubenswrapper[5014]: I1006 22:15:39.289117 5014 generic.go:334] "Generic (PLEG): container finished" podID="55d42a51-a88e-467e-ba4f-319c76c880f7" containerID="f067955f1d7949768245e4f9c492d1e16a6529efdb29162650436a56d98c13f9" exitCode=0 Oct 06 22:15:39 crc kubenswrapper[5014]: I1006 22:15:39.289238 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ll4nd" event={"ID":"55d42a51-a88e-467e-ba4f-319c76c880f7","Type":"ContainerDied","Data":"f067955f1d7949768245e4f9c492d1e16a6529efdb29162650436a56d98c13f9"} Oct 06 22:15:39 crc kubenswrapper[5014]: I1006 22:15:39.289339 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ll4nd" event={"ID":"55d42a51-a88e-467e-ba4f-319c76c880f7","Type":"ContainerStarted","Data":"5059c477fef57122029e7f0d55b07bd7f88e4bbda6bf5acb507d7bba8a540666"} Oct 06 22:15:39 crc kubenswrapper[5014]: I1006 22:15:39.291604 5014 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 06 22:15:40 crc kubenswrapper[5014]: I1006 22:15:40.304670 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ll4nd" event={"ID":"55d42a51-a88e-467e-ba4f-319c76c880f7","Type":"ContainerDied","Data":"f9f4cff77da37715e56f6cf213ccc1629acd30bb4c39b23c95e754593535d46c"} Oct 06 22:15:40 crc kubenswrapper[5014]: I1006 22:15:40.306766 5014 generic.go:334] "Generic (PLEG): container finished" podID="55d42a51-a88e-467e-ba4f-319c76c880f7" containerID="f9f4cff77da37715e56f6cf213ccc1629acd30bb4c39b23c95e754593535d46c" exitCode=0 Oct 06 22:15:40 crc kubenswrapper[5014]: E1006 22:15:40.411600 5014 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod55d42a51_a88e_467e_ba4f_319c76c880f7.slice/crio-conmon-f9f4cff77da37715e56f6cf213ccc1629acd30bb4c39b23c95e754593535d46c.scope\": RecentStats: unable to find data in memory cache]" Oct 06 22:15:41 crc kubenswrapper[5014]: I1006 22:15:41.319844 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ll4nd" event={"ID":"55d42a51-a88e-467e-ba4f-319c76c880f7","Type":"ContainerStarted","Data":"5cc18e7f7553233787ab41aaa034657f73c7b844c3af1c318defc002754cf068"} Oct 06 22:15:48 crc kubenswrapper[5014]: I1006 22:15:48.201696 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:48 crc kubenswrapper[5014]: I1006 22:15:48.202306 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:48 crc kubenswrapper[5014]: I1006 22:15:48.278656 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:48 crc kubenswrapper[5014]: I1006 22:15:48.310267 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ll4nd" podStartSLOduration=9.861442738000001 podStartE2EDuration="11.310247005s" podCreationTimestamp="2025-10-06 22:15:37 +0000 UTC" firstStartedPulling="2025-10-06 22:15:39.291209244 +0000 UTC m=+2684.584246018" lastFinishedPulling="2025-10-06 22:15:40.740013511 +0000 UTC m=+2686.033050285" observedRunningTime="2025-10-06 22:15:41.349338446 +0000 UTC m=+2686.642375210" watchObservedRunningTime="2025-10-06 22:15:48.310247005 +0000 UTC m=+2693.603283759" Oct 06 22:15:48 crc kubenswrapper[5014]: I1006 22:15:48.477390 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:48 crc kubenswrapper[5014]: I1006 22:15:48.542188 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ll4nd"] Oct 06 22:15:50 crc kubenswrapper[5014]: I1006 22:15:50.421684 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ll4nd" podUID="55d42a51-a88e-467e-ba4f-319c76c880f7" containerName="registry-server" containerID="cri-o://5cc18e7f7553233787ab41aaa034657f73c7b844c3af1c318defc002754cf068" gracePeriod=2 Oct 06 22:15:50 crc kubenswrapper[5014]: E1006 22:15:50.641210 5014 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod55d42a51_a88e_467e_ba4f_319c76c880f7.slice/crio-5cc18e7f7553233787ab41aaa034657f73c7b844c3af1c318defc002754cf068.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod55d42a51_a88e_467e_ba4f_319c76c880f7.slice/crio-conmon-5cc18e7f7553233787ab41aaa034657f73c7b844c3af1c318defc002754cf068.scope\": RecentStats: unable to find data in memory cache]" Oct 06 22:15:50 crc kubenswrapper[5014]: I1006 22:15:50.859492 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:50 crc kubenswrapper[5014]: I1006 22:15:50.935022 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8q9x\" (UniqueName: \"kubernetes.io/projected/55d42a51-a88e-467e-ba4f-319c76c880f7-kube-api-access-h8q9x\") pod \"55d42a51-a88e-467e-ba4f-319c76c880f7\" (UID: \"55d42a51-a88e-467e-ba4f-319c76c880f7\") " Oct 06 22:15:50 crc kubenswrapper[5014]: I1006 22:15:50.935322 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55d42a51-a88e-467e-ba4f-319c76c880f7-catalog-content\") pod \"55d42a51-a88e-467e-ba4f-319c76c880f7\" (UID: \"55d42a51-a88e-467e-ba4f-319c76c880f7\") " Oct 06 22:15:50 crc kubenswrapper[5014]: I1006 22:15:50.935672 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55d42a51-a88e-467e-ba4f-319c76c880f7-utilities\") pod \"55d42a51-a88e-467e-ba4f-319c76c880f7\" (UID: \"55d42a51-a88e-467e-ba4f-319c76c880f7\") " Oct 06 22:15:50 crc kubenswrapper[5014]: I1006 22:15:50.936740 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55d42a51-a88e-467e-ba4f-319c76c880f7-utilities" (OuterVolumeSpecName: "utilities") pod "55d42a51-a88e-467e-ba4f-319c76c880f7" (UID: "55d42a51-a88e-467e-ba4f-319c76c880f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:15:50 crc kubenswrapper[5014]: I1006 22:15:50.937681 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55d42a51-a88e-467e-ba4f-319c76c880f7-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:15:50 crc kubenswrapper[5014]: I1006 22:15:50.941811 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55d42a51-a88e-467e-ba4f-319c76c880f7-kube-api-access-h8q9x" (OuterVolumeSpecName: "kube-api-access-h8q9x") pod "55d42a51-a88e-467e-ba4f-319c76c880f7" (UID: "55d42a51-a88e-467e-ba4f-319c76c880f7"). InnerVolumeSpecName "kube-api-access-h8q9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:15:50 crc kubenswrapper[5014]: I1006 22:15:50.966325 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55d42a51-a88e-467e-ba4f-319c76c880f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "55d42a51-a88e-467e-ba4f-319c76c880f7" (UID: "55d42a51-a88e-467e-ba4f-319c76c880f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.039886 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8q9x\" (UniqueName: \"kubernetes.io/projected/55d42a51-a88e-467e-ba4f-319c76c880f7-kube-api-access-h8q9x\") on node \"crc\" DevicePath \"\"" Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.039946 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55d42a51-a88e-467e-ba4f-319c76c880f7-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.435904 5014 generic.go:334] "Generic (PLEG): container finished" podID="55d42a51-a88e-467e-ba4f-319c76c880f7" containerID="5cc18e7f7553233787ab41aaa034657f73c7b844c3af1c318defc002754cf068" exitCode=0 Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.435989 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ll4nd" event={"ID":"55d42a51-a88e-467e-ba4f-319c76c880f7","Type":"ContainerDied","Data":"5cc18e7f7553233787ab41aaa034657f73c7b844c3af1c318defc002754cf068"} Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.436052 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ll4nd" event={"ID":"55d42a51-a88e-467e-ba4f-319c76c880f7","Type":"ContainerDied","Data":"5059c477fef57122029e7f0d55b07bd7f88e4bbda6bf5acb507d7bba8a540666"} Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.436094 5014 scope.go:117] "RemoveContainer" containerID="5cc18e7f7553233787ab41aaa034657f73c7b844c3af1c318defc002754cf068" Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.436382 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ll4nd" Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.466332 5014 scope.go:117] "RemoveContainer" containerID="f9f4cff77da37715e56f6cf213ccc1629acd30bb4c39b23c95e754593535d46c" Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.510756 5014 scope.go:117] "RemoveContainer" containerID="f067955f1d7949768245e4f9c492d1e16a6529efdb29162650436a56d98c13f9" Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.514006 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ll4nd"] Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.514073 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ll4nd"] Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.552890 5014 scope.go:117] "RemoveContainer" containerID="5cc18e7f7553233787ab41aaa034657f73c7b844c3af1c318defc002754cf068" Oct 06 22:15:51 crc kubenswrapper[5014]: E1006 22:15:51.553509 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cc18e7f7553233787ab41aaa034657f73c7b844c3af1c318defc002754cf068\": container with ID starting with 5cc18e7f7553233787ab41aaa034657f73c7b844c3af1c318defc002754cf068 not found: ID does not exist" containerID="5cc18e7f7553233787ab41aaa034657f73c7b844c3af1c318defc002754cf068" Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.553575 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cc18e7f7553233787ab41aaa034657f73c7b844c3af1c318defc002754cf068"} err="failed to get container status \"5cc18e7f7553233787ab41aaa034657f73c7b844c3af1c318defc002754cf068\": rpc error: code = NotFound desc = could not find container \"5cc18e7f7553233787ab41aaa034657f73c7b844c3af1c318defc002754cf068\": container with ID starting with 5cc18e7f7553233787ab41aaa034657f73c7b844c3af1c318defc002754cf068 not found: ID does not exist" Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.553643 5014 scope.go:117] "RemoveContainer" containerID="f9f4cff77da37715e56f6cf213ccc1629acd30bb4c39b23c95e754593535d46c" Oct 06 22:15:51 crc kubenswrapper[5014]: E1006 22:15:51.554322 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9f4cff77da37715e56f6cf213ccc1629acd30bb4c39b23c95e754593535d46c\": container with ID starting with f9f4cff77da37715e56f6cf213ccc1629acd30bb4c39b23c95e754593535d46c not found: ID does not exist" containerID="f9f4cff77da37715e56f6cf213ccc1629acd30bb4c39b23c95e754593535d46c" Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.554415 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9f4cff77da37715e56f6cf213ccc1629acd30bb4c39b23c95e754593535d46c"} err="failed to get container status \"f9f4cff77da37715e56f6cf213ccc1629acd30bb4c39b23c95e754593535d46c\": rpc error: code = NotFound desc = could not find container \"f9f4cff77da37715e56f6cf213ccc1629acd30bb4c39b23c95e754593535d46c\": container with ID starting with f9f4cff77da37715e56f6cf213ccc1629acd30bb4c39b23c95e754593535d46c not found: ID does not exist" Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.554943 5014 scope.go:117] "RemoveContainer" containerID="f067955f1d7949768245e4f9c492d1e16a6529efdb29162650436a56d98c13f9" Oct 06 22:15:51 crc kubenswrapper[5014]: E1006 22:15:51.555463 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f067955f1d7949768245e4f9c492d1e16a6529efdb29162650436a56d98c13f9\": container with ID starting with f067955f1d7949768245e4f9c492d1e16a6529efdb29162650436a56d98c13f9 not found: ID does not exist" containerID="f067955f1d7949768245e4f9c492d1e16a6529efdb29162650436a56d98c13f9" Oct 06 22:15:51 crc kubenswrapper[5014]: I1006 22:15:51.555500 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f067955f1d7949768245e4f9c492d1e16a6529efdb29162650436a56d98c13f9"} err="failed to get container status \"f067955f1d7949768245e4f9c492d1e16a6529efdb29162650436a56d98c13f9\": rpc error: code = NotFound desc = could not find container \"f067955f1d7949768245e4f9c492d1e16a6529efdb29162650436a56d98c13f9\": container with ID starting with f067955f1d7949768245e4f9c492d1e16a6529efdb29162650436a56d98c13f9 not found: ID does not exist" Oct 06 22:15:53 crc kubenswrapper[5014]: I1006 22:15:53.506537 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55d42a51-a88e-467e-ba4f-319c76c880f7" path="/var/lib/kubelet/pods/55d42a51-a88e-467e-ba4f-319c76c880f7/volumes" Oct 06 22:16:01 crc kubenswrapper[5014]: I1006 22:16:01.717028 5014 scope.go:117] "RemoveContainer" containerID="9a7321f375cc06c6f04b42cc9c645e450c3b940eb3b374323d4bb7feb8985a37" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.029747 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bxgch"] Oct 06 22:16:28 crc kubenswrapper[5014]: E1006 22:16:28.030795 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55d42a51-a88e-467e-ba4f-319c76c880f7" containerName="extract-utilities" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.030812 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="55d42a51-a88e-467e-ba4f-319c76c880f7" containerName="extract-utilities" Oct 06 22:16:28 crc kubenswrapper[5014]: E1006 22:16:28.030850 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55d42a51-a88e-467e-ba4f-319c76c880f7" containerName="extract-content" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.030859 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="55d42a51-a88e-467e-ba4f-319c76c880f7" containerName="extract-content" Oct 06 22:16:28 crc kubenswrapper[5014]: E1006 22:16:28.030880 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55d42a51-a88e-467e-ba4f-319c76c880f7" containerName="registry-server" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.030892 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="55d42a51-a88e-467e-ba4f-319c76c880f7" containerName="registry-server" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.031089 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="55d42a51-a88e-467e-ba4f-319c76c880f7" containerName="registry-server" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.032509 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.038141 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bxgch"] Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.230853 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8d8p\" (UniqueName: \"kubernetes.io/projected/6158195f-16b8-4a2d-8c85-01aacb43da41-kube-api-access-x8d8p\") pod \"certified-operators-bxgch\" (UID: \"6158195f-16b8-4a2d-8c85-01aacb43da41\") " pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.230921 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6158195f-16b8-4a2d-8c85-01aacb43da41-utilities\") pod \"certified-operators-bxgch\" (UID: \"6158195f-16b8-4a2d-8c85-01aacb43da41\") " pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.230984 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6158195f-16b8-4a2d-8c85-01aacb43da41-catalog-content\") pod \"certified-operators-bxgch\" (UID: \"6158195f-16b8-4a2d-8c85-01aacb43da41\") " pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.332809 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6158195f-16b8-4a2d-8c85-01aacb43da41-catalog-content\") pod \"certified-operators-bxgch\" (UID: \"6158195f-16b8-4a2d-8c85-01aacb43da41\") " pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.332992 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8d8p\" (UniqueName: \"kubernetes.io/projected/6158195f-16b8-4a2d-8c85-01aacb43da41-kube-api-access-x8d8p\") pod \"certified-operators-bxgch\" (UID: \"6158195f-16b8-4a2d-8c85-01aacb43da41\") " pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.333056 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6158195f-16b8-4a2d-8c85-01aacb43da41-utilities\") pod \"certified-operators-bxgch\" (UID: \"6158195f-16b8-4a2d-8c85-01aacb43da41\") " pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.333452 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6158195f-16b8-4a2d-8c85-01aacb43da41-catalog-content\") pod \"certified-operators-bxgch\" (UID: \"6158195f-16b8-4a2d-8c85-01aacb43da41\") " pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.333485 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6158195f-16b8-4a2d-8c85-01aacb43da41-utilities\") pod \"certified-operators-bxgch\" (UID: \"6158195f-16b8-4a2d-8c85-01aacb43da41\") " pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.353449 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8d8p\" (UniqueName: \"kubernetes.io/projected/6158195f-16b8-4a2d-8c85-01aacb43da41-kube-api-access-x8d8p\") pod \"certified-operators-bxgch\" (UID: \"6158195f-16b8-4a2d-8c85-01aacb43da41\") " pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.359212 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:28 crc kubenswrapper[5014]: I1006 22:16:28.830281 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bxgch"] Oct 06 22:16:29 crc kubenswrapper[5014]: I1006 22:16:29.797019 5014 generic.go:334] "Generic (PLEG): container finished" podID="6158195f-16b8-4a2d-8c85-01aacb43da41" containerID="86cafe713c11838a3365e13de270dfe7535fddaad6fe1cb52e0d6deae522a05c" exitCode=0 Oct 06 22:16:29 crc kubenswrapper[5014]: I1006 22:16:29.797067 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bxgch" event={"ID":"6158195f-16b8-4a2d-8c85-01aacb43da41","Type":"ContainerDied","Data":"86cafe713c11838a3365e13de270dfe7535fddaad6fe1cb52e0d6deae522a05c"} Oct 06 22:16:29 crc kubenswrapper[5014]: I1006 22:16:29.797115 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bxgch" event={"ID":"6158195f-16b8-4a2d-8c85-01aacb43da41","Type":"ContainerStarted","Data":"b96b52e8fc1c692dd6833af3edb40c9f8bb59e5381978c54295368d2720210c7"} Oct 06 22:16:30 crc kubenswrapper[5014]: I1006 22:16:30.804044 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bxgch" event={"ID":"6158195f-16b8-4a2d-8c85-01aacb43da41","Type":"ContainerStarted","Data":"c109b3ee633f5b0e14cb7308dedc7b8fccf506f00599afbcd95c065b1e0b3213"} Oct 06 22:16:31 crc kubenswrapper[5014]: I1006 22:16:31.816902 5014 generic.go:334] "Generic (PLEG): container finished" podID="6158195f-16b8-4a2d-8c85-01aacb43da41" containerID="c109b3ee633f5b0e14cb7308dedc7b8fccf506f00599afbcd95c065b1e0b3213" exitCode=0 Oct 06 22:16:31 crc kubenswrapper[5014]: I1006 22:16:31.816972 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bxgch" event={"ID":"6158195f-16b8-4a2d-8c85-01aacb43da41","Type":"ContainerDied","Data":"c109b3ee633f5b0e14cb7308dedc7b8fccf506f00599afbcd95c065b1e0b3213"} Oct 06 22:16:32 crc kubenswrapper[5014]: I1006 22:16:32.827311 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bxgch" event={"ID":"6158195f-16b8-4a2d-8c85-01aacb43da41","Type":"ContainerStarted","Data":"fdc34b94ae3a831e9f72da48e0adfe20e02dcb8da88e4bbbf4cd3023b23e4136"} Oct 06 22:16:32 crc kubenswrapper[5014]: I1006 22:16:32.854330 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bxgch" podStartSLOduration=2.294701056 podStartE2EDuration="4.854305903s" podCreationTimestamp="2025-10-06 22:16:28 +0000 UTC" firstStartedPulling="2025-10-06 22:16:29.800112257 +0000 UTC m=+2735.093148991" lastFinishedPulling="2025-10-06 22:16:32.359717064 +0000 UTC m=+2737.652753838" observedRunningTime="2025-10-06 22:16:32.846482288 +0000 UTC m=+2738.139519052" watchObservedRunningTime="2025-10-06 22:16:32.854305903 +0000 UTC m=+2738.147342677" Oct 06 22:16:38 crc kubenswrapper[5014]: I1006 22:16:38.359738 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:38 crc kubenswrapper[5014]: I1006 22:16:38.360175 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:38 crc kubenswrapper[5014]: I1006 22:16:38.456998 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:38 crc kubenswrapper[5014]: I1006 22:16:38.960606 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:39 crc kubenswrapper[5014]: I1006 22:16:39.031476 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bxgch"] Oct 06 22:16:40 crc kubenswrapper[5014]: I1006 22:16:40.902710 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bxgch" podUID="6158195f-16b8-4a2d-8c85-01aacb43da41" containerName="registry-server" containerID="cri-o://fdc34b94ae3a831e9f72da48e0adfe20e02dcb8da88e4bbbf4cd3023b23e4136" gracePeriod=2 Oct 06 22:16:41 crc kubenswrapper[5014]: I1006 22:16:41.915582 5014 generic.go:334] "Generic (PLEG): container finished" podID="6158195f-16b8-4a2d-8c85-01aacb43da41" containerID="fdc34b94ae3a831e9f72da48e0adfe20e02dcb8da88e4bbbf4cd3023b23e4136" exitCode=0 Oct 06 22:16:41 crc kubenswrapper[5014]: I1006 22:16:41.915684 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bxgch" event={"ID":"6158195f-16b8-4a2d-8c85-01aacb43da41","Type":"ContainerDied","Data":"fdc34b94ae3a831e9f72da48e0adfe20e02dcb8da88e4bbbf4cd3023b23e4136"} Oct 06 22:16:41 crc kubenswrapper[5014]: I1006 22:16:41.915988 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bxgch" event={"ID":"6158195f-16b8-4a2d-8c85-01aacb43da41","Type":"ContainerDied","Data":"b96b52e8fc1c692dd6833af3edb40c9f8bb59e5381978c54295368d2720210c7"} Oct 06 22:16:41 crc kubenswrapper[5014]: I1006 22:16:41.916002 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b96b52e8fc1c692dd6833af3edb40c9f8bb59e5381978c54295368d2720210c7" Oct 06 22:16:41 crc kubenswrapper[5014]: I1006 22:16:41.942123 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:42 crc kubenswrapper[5014]: I1006 22:16:42.046117 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6158195f-16b8-4a2d-8c85-01aacb43da41-catalog-content\") pod \"6158195f-16b8-4a2d-8c85-01aacb43da41\" (UID: \"6158195f-16b8-4a2d-8c85-01aacb43da41\") " Oct 06 22:16:42 crc kubenswrapper[5014]: I1006 22:16:42.046200 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6158195f-16b8-4a2d-8c85-01aacb43da41-utilities\") pod \"6158195f-16b8-4a2d-8c85-01aacb43da41\" (UID: \"6158195f-16b8-4a2d-8c85-01aacb43da41\") " Oct 06 22:16:42 crc kubenswrapper[5014]: I1006 22:16:42.046251 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8d8p\" (UniqueName: \"kubernetes.io/projected/6158195f-16b8-4a2d-8c85-01aacb43da41-kube-api-access-x8d8p\") pod \"6158195f-16b8-4a2d-8c85-01aacb43da41\" (UID: \"6158195f-16b8-4a2d-8c85-01aacb43da41\") " Oct 06 22:16:42 crc kubenswrapper[5014]: I1006 22:16:42.047226 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6158195f-16b8-4a2d-8c85-01aacb43da41-utilities" (OuterVolumeSpecName: "utilities") pod "6158195f-16b8-4a2d-8c85-01aacb43da41" (UID: "6158195f-16b8-4a2d-8c85-01aacb43da41"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:16:42 crc kubenswrapper[5014]: I1006 22:16:42.055569 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6158195f-16b8-4a2d-8c85-01aacb43da41-kube-api-access-x8d8p" (OuterVolumeSpecName: "kube-api-access-x8d8p") pod "6158195f-16b8-4a2d-8c85-01aacb43da41" (UID: "6158195f-16b8-4a2d-8c85-01aacb43da41"). InnerVolumeSpecName "kube-api-access-x8d8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:16:42 crc kubenswrapper[5014]: I1006 22:16:42.118738 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6158195f-16b8-4a2d-8c85-01aacb43da41-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6158195f-16b8-4a2d-8c85-01aacb43da41" (UID: "6158195f-16b8-4a2d-8c85-01aacb43da41"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:16:42 crc kubenswrapper[5014]: I1006 22:16:42.147803 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6158195f-16b8-4a2d-8c85-01aacb43da41-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:16:42 crc kubenswrapper[5014]: I1006 22:16:42.147838 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6158195f-16b8-4a2d-8c85-01aacb43da41-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:16:42 crc kubenswrapper[5014]: I1006 22:16:42.147854 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8d8p\" (UniqueName: \"kubernetes.io/projected/6158195f-16b8-4a2d-8c85-01aacb43da41-kube-api-access-x8d8p\") on node \"crc\" DevicePath \"\"" Oct 06 22:16:42 crc kubenswrapper[5014]: I1006 22:16:42.926016 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bxgch" Oct 06 22:16:42 crc kubenswrapper[5014]: I1006 22:16:42.982440 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bxgch"] Oct 06 22:16:42 crc kubenswrapper[5014]: I1006 22:16:42.991538 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bxgch"] Oct 06 22:16:43 crc kubenswrapper[5014]: I1006 22:16:43.502607 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6158195f-16b8-4a2d-8c85-01aacb43da41" path="/var/lib/kubelet/pods/6158195f-16b8-4a2d-8c85-01aacb43da41/volumes" Oct 06 22:17:51 crc kubenswrapper[5014]: I1006 22:17:51.735358 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:17:51 crc kubenswrapper[5014]: I1006 22:17:51.735926 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:18:21 crc kubenswrapper[5014]: I1006 22:18:21.736406 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:18:21 crc kubenswrapper[5014]: I1006 22:18:21.737273 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:18:51 crc kubenswrapper[5014]: I1006 22:18:51.735234 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:18:51 crc kubenswrapper[5014]: I1006 22:18:51.736878 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:18:51 crc kubenswrapper[5014]: I1006 22:18:51.736997 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 22:18:51 crc kubenswrapper[5014]: I1006 22:18:51.738201 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"dcc0d26c271b15739956d985f7ad3120a8d4e8910b238b26753fb5936d6e11b2"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 22:18:51 crc kubenswrapper[5014]: I1006 22:18:51.738342 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://dcc0d26c271b15739956d985f7ad3120a8d4e8910b238b26753fb5936d6e11b2" gracePeriod=600 Oct 06 22:18:52 crc kubenswrapper[5014]: I1006 22:18:52.245968 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="dcc0d26c271b15739956d985f7ad3120a8d4e8910b238b26753fb5936d6e11b2" exitCode=0 Oct 06 22:18:52 crc kubenswrapper[5014]: I1006 22:18:52.246068 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"dcc0d26c271b15739956d985f7ad3120a8d4e8910b238b26753fb5936d6e11b2"} Oct 06 22:18:52 crc kubenswrapper[5014]: I1006 22:18:52.246352 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b"} Oct 06 22:18:52 crc kubenswrapper[5014]: I1006 22:18:52.246385 5014 scope.go:117] "RemoveContainer" containerID="595839fc646663bf033098a57a44b611d1fd259f4c6f8d8eace24fe4561f1ce6" Oct 06 22:20:38 crc kubenswrapper[5014]: I1006 22:20:38.848670 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ng597"] Oct 06 22:20:38 crc kubenswrapper[5014]: E1006 22:20:38.849787 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6158195f-16b8-4a2d-8c85-01aacb43da41" containerName="registry-server" Oct 06 22:20:38 crc kubenswrapper[5014]: I1006 22:20:38.849810 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="6158195f-16b8-4a2d-8c85-01aacb43da41" containerName="registry-server" Oct 06 22:20:38 crc kubenswrapper[5014]: E1006 22:20:38.849830 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6158195f-16b8-4a2d-8c85-01aacb43da41" containerName="extract-content" Oct 06 22:20:38 crc kubenswrapper[5014]: I1006 22:20:38.849842 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="6158195f-16b8-4a2d-8c85-01aacb43da41" containerName="extract-content" Oct 06 22:20:38 crc kubenswrapper[5014]: E1006 22:20:38.849879 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6158195f-16b8-4a2d-8c85-01aacb43da41" containerName="extract-utilities" Oct 06 22:20:38 crc kubenswrapper[5014]: I1006 22:20:38.849891 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="6158195f-16b8-4a2d-8c85-01aacb43da41" containerName="extract-utilities" Oct 06 22:20:38 crc kubenswrapper[5014]: I1006 22:20:38.850096 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="6158195f-16b8-4a2d-8c85-01aacb43da41" containerName="registry-server" Oct 06 22:20:38 crc kubenswrapper[5014]: I1006 22:20:38.851640 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:38 crc kubenswrapper[5014]: I1006 22:20:38.866865 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ng597"] Oct 06 22:20:38 crc kubenswrapper[5014]: I1006 22:20:38.872685 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-catalog-content\") pod \"community-operators-ng597\" (UID: \"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80\") " pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:38 crc kubenswrapper[5014]: I1006 22:20:38.872826 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnf26\" (UniqueName: \"kubernetes.io/projected/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-kube-api-access-vnf26\") pod \"community-operators-ng597\" (UID: \"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80\") " pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:38 crc kubenswrapper[5014]: I1006 22:20:38.873000 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-utilities\") pod \"community-operators-ng597\" (UID: \"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80\") " pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:38 crc kubenswrapper[5014]: I1006 22:20:38.974850 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-catalog-content\") pod \"community-operators-ng597\" (UID: \"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80\") " pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:38 crc kubenswrapper[5014]: I1006 22:20:38.974962 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnf26\" (UniqueName: \"kubernetes.io/projected/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-kube-api-access-vnf26\") pod \"community-operators-ng597\" (UID: \"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80\") " pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:38 crc kubenswrapper[5014]: I1006 22:20:38.975006 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-utilities\") pod \"community-operators-ng597\" (UID: \"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80\") " pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:38 crc kubenswrapper[5014]: I1006 22:20:38.975520 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-catalog-content\") pod \"community-operators-ng597\" (UID: \"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80\") " pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:38 crc kubenswrapper[5014]: I1006 22:20:38.975561 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-utilities\") pod \"community-operators-ng597\" (UID: \"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80\") " pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:39 crc kubenswrapper[5014]: I1006 22:20:39.002681 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnf26\" (UniqueName: \"kubernetes.io/projected/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-kube-api-access-vnf26\") pod \"community-operators-ng597\" (UID: \"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80\") " pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:39 crc kubenswrapper[5014]: I1006 22:20:39.182094 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:39 crc kubenswrapper[5014]: I1006 22:20:39.668480 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ng597"] Oct 06 22:20:40 crc kubenswrapper[5014]: I1006 22:20:40.324759 5014 generic.go:334] "Generic (PLEG): container finished" podID="e454ee45-0feb-43ae-9c4e-7e93b8a3ef80" containerID="a0c1504d05ab7bde0e3562c98d66e164fb9fc8aca0910406af388ee9b9471d56" exitCode=0 Oct 06 22:20:40 crc kubenswrapper[5014]: I1006 22:20:40.324819 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ng597" event={"ID":"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80","Type":"ContainerDied","Data":"a0c1504d05ab7bde0e3562c98d66e164fb9fc8aca0910406af388ee9b9471d56"} Oct 06 22:20:40 crc kubenswrapper[5014]: I1006 22:20:40.324858 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ng597" event={"ID":"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80","Type":"ContainerStarted","Data":"7013ff5da9662af4454fb1aeb2d8fc74faeb6c9a33708bfd84035827f05d8cf3"} Oct 06 22:20:40 crc kubenswrapper[5014]: I1006 22:20:40.330466 5014 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 06 22:20:41 crc kubenswrapper[5014]: I1006 22:20:41.339013 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ng597" event={"ID":"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80","Type":"ContainerStarted","Data":"a7bd860b01bf52398d8c1ca1d6ead9b225e2fa152b6390673e5adc8fb5478d65"} Oct 06 22:20:42 crc kubenswrapper[5014]: I1006 22:20:42.351744 5014 generic.go:334] "Generic (PLEG): container finished" podID="e454ee45-0feb-43ae-9c4e-7e93b8a3ef80" containerID="a7bd860b01bf52398d8c1ca1d6ead9b225e2fa152b6390673e5adc8fb5478d65" exitCode=0 Oct 06 22:20:42 crc kubenswrapper[5014]: I1006 22:20:42.351820 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ng597" event={"ID":"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80","Type":"ContainerDied","Data":"a7bd860b01bf52398d8c1ca1d6ead9b225e2fa152b6390673e5adc8fb5478d65"} Oct 06 22:20:43 crc kubenswrapper[5014]: I1006 22:20:43.364880 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ng597" event={"ID":"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80","Type":"ContainerStarted","Data":"e11a68d292aca4580443b8bf9fb16826911472fbef500648e9edf270cd536961"} Oct 06 22:20:43 crc kubenswrapper[5014]: I1006 22:20:43.396589 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ng597" podStartSLOduration=2.759685746 podStartE2EDuration="5.396563042s" podCreationTimestamp="2025-10-06 22:20:38 +0000 UTC" firstStartedPulling="2025-10-06 22:20:40.329871987 +0000 UTC m=+2985.622908761" lastFinishedPulling="2025-10-06 22:20:42.966749283 +0000 UTC m=+2988.259786057" observedRunningTime="2025-10-06 22:20:43.392742043 +0000 UTC m=+2988.685778817" watchObservedRunningTime="2025-10-06 22:20:43.396563042 +0000 UTC m=+2988.689599826" Oct 06 22:20:44 crc kubenswrapper[5014]: I1006 22:20:44.412196 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gnn25"] Oct 06 22:20:44 crc kubenswrapper[5014]: I1006 22:20:44.415263 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:44 crc kubenswrapper[5014]: I1006 22:20:44.428291 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gnn25"] Oct 06 22:20:44 crc kubenswrapper[5014]: I1006 22:20:44.461237 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ddb9x\" (UniqueName: \"kubernetes.io/projected/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-kube-api-access-ddb9x\") pod \"redhat-operators-gnn25\" (UID: \"6755ca71-09d7-4ff9-b3dd-fef8a8f87738\") " pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:44 crc kubenswrapper[5014]: I1006 22:20:44.461291 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-utilities\") pod \"redhat-operators-gnn25\" (UID: \"6755ca71-09d7-4ff9-b3dd-fef8a8f87738\") " pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:44 crc kubenswrapper[5014]: I1006 22:20:44.461443 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-catalog-content\") pod \"redhat-operators-gnn25\" (UID: \"6755ca71-09d7-4ff9-b3dd-fef8a8f87738\") " pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:44 crc kubenswrapper[5014]: I1006 22:20:44.564170 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-catalog-content\") pod \"redhat-operators-gnn25\" (UID: \"6755ca71-09d7-4ff9-b3dd-fef8a8f87738\") " pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:44 crc kubenswrapper[5014]: I1006 22:20:44.564294 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ddb9x\" (UniqueName: \"kubernetes.io/projected/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-kube-api-access-ddb9x\") pod \"redhat-operators-gnn25\" (UID: \"6755ca71-09d7-4ff9-b3dd-fef8a8f87738\") " pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:44 crc kubenswrapper[5014]: I1006 22:20:44.564343 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-utilities\") pod \"redhat-operators-gnn25\" (UID: \"6755ca71-09d7-4ff9-b3dd-fef8a8f87738\") " pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:44 crc kubenswrapper[5014]: I1006 22:20:44.564778 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-catalog-content\") pod \"redhat-operators-gnn25\" (UID: \"6755ca71-09d7-4ff9-b3dd-fef8a8f87738\") " pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:44 crc kubenswrapper[5014]: I1006 22:20:44.564997 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-utilities\") pod \"redhat-operators-gnn25\" (UID: \"6755ca71-09d7-4ff9-b3dd-fef8a8f87738\") " pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:44 crc kubenswrapper[5014]: I1006 22:20:44.601900 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ddb9x\" (UniqueName: \"kubernetes.io/projected/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-kube-api-access-ddb9x\") pod \"redhat-operators-gnn25\" (UID: \"6755ca71-09d7-4ff9-b3dd-fef8a8f87738\") " pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:44 crc kubenswrapper[5014]: I1006 22:20:44.754205 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:44 crc kubenswrapper[5014]: I1006 22:20:44.992314 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gnn25"] Oct 06 22:20:45 crc kubenswrapper[5014]: I1006 22:20:45.380880 5014 generic.go:334] "Generic (PLEG): container finished" podID="6755ca71-09d7-4ff9-b3dd-fef8a8f87738" containerID="a234da4874d82f5a9f5f25fcfad721373c69b694982509ad2633914d5ca1dbaa" exitCode=0 Oct 06 22:20:45 crc kubenswrapper[5014]: I1006 22:20:45.380923 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gnn25" event={"ID":"6755ca71-09d7-4ff9-b3dd-fef8a8f87738","Type":"ContainerDied","Data":"a234da4874d82f5a9f5f25fcfad721373c69b694982509ad2633914d5ca1dbaa"} Oct 06 22:20:45 crc kubenswrapper[5014]: I1006 22:20:45.380948 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gnn25" event={"ID":"6755ca71-09d7-4ff9-b3dd-fef8a8f87738","Type":"ContainerStarted","Data":"c759fa6256002a5156d2f40c61079833e0bb648d843293cb619d093966cc107c"} Oct 06 22:20:46 crc kubenswrapper[5014]: I1006 22:20:46.399603 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gnn25" event={"ID":"6755ca71-09d7-4ff9-b3dd-fef8a8f87738","Type":"ContainerStarted","Data":"dc75585fac820a190c14bedc98a3db59cff4f22fc9ffda8712f193de4948ac3e"} Oct 06 22:20:47 crc kubenswrapper[5014]: I1006 22:20:47.409998 5014 generic.go:334] "Generic (PLEG): container finished" podID="6755ca71-09d7-4ff9-b3dd-fef8a8f87738" containerID="dc75585fac820a190c14bedc98a3db59cff4f22fc9ffda8712f193de4948ac3e" exitCode=0 Oct 06 22:20:47 crc kubenswrapper[5014]: I1006 22:20:47.410097 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gnn25" event={"ID":"6755ca71-09d7-4ff9-b3dd-fef8a8f87738","Type":"ContainerDied","Data":"dc75585fac820a190c14bedc98a3db59cff4f22fc9ffda8712f193de4948ac3e"} Oct 06 22:20:48 crc kubenswrapper[5014]: I1006 22:20:48.426889 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gnn25" event={"ID":"6755ca71-09d7-4ff9-b3dd-fef8a8f87738","Type":"ContainerStarted","Data":"3dd69711c71294e1e95f3b59e0bf6491b14f1f38cbac93b9c456bac218f10dbf"} Oct 06 22:20:48 crc kubenswrapper[5014]: I1006 22:20:48.458709 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gnn25" podStartSLOduration=2.068078538 podStartE2EDuration="4.458685956s" podCreationTimestamp="2025-10-06 22:20:44 +0000 UTC" firstStartedPulling="2025-10-06 22:20:45.383109864 +0000 UTC m=+2990.676146598" lastFinishedPulling="2025-10-06 22:20:47.773717272 +0000 UTC m=+2993.066754016" observedRunningTime="2025-10-06 22:20:48.452484392 +0000 UTC m=+2993.745521156" watchObservedRunningTime="2025-10-06 22:20:48.458685956 +0000 UTC m=+2993.751722730" Oct 06 22:20:49 crc kubenswrapper[5014]: I1006 22:20:49.182363 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:49 crc kubenswrapper[5014]: I1006 22:20:49.182410 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:49 crc kubenswrapper[5014]: I1006 22:20:49.252199 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:49 crc kubenswrapper[5014]: I1006 22:20:49.514312 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:51 crc kubenswrapper[5014]: I1006 22:20:51.402422 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ng597"] Oct 06 22:20:51 crc kubenswrapper[5014]: I1006 22:20:51.477964 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ng597" podUID="e454ee45-0feb-43ae-9c4e-7e93b8a3ef80" containerName="registry-server" containerID="cri-o://e11a68d292aca4580443b8bf9fb16826911472fbef500648e9edf270cd536961" gracePeriod=2 Oct 06 22:20:51 crc kubenswrapper[5014]: I1006 22:20:51.979340 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.030090 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vnf26\" (UniqueName: \"kubernetes.io/projected/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-kube-api-access-vnf26\") pod \"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80\" (UID: \"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80\") " Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.030173 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-catalog-content\") pod \"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80\" (UID: \"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80\") " Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.030238 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-utilities\") pod \"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80\" (UID: \"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80\") " Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.031960 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-utilities" (OuterVolumeSpecName: "utilities") pod "e454ee45-0feb-43ae-9c4e-7e93b8a3ef80" (UID: "e454ee45-0feb-43ae-9c4e-7e93b8a3ef80"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.041832 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-kube-api-access-vnf26" (OuterVolumeSpecName: "kube-api-access-vnf26") pod "e454ee45-0feb-43ae-9c4e-7e93b8a3ef80" (UID: "e454ee45-0feb-43ae-9c4e-7e93b8a3ef80"). InnerVolumeSpecName "kube-api-access-vnf26". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.131738 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vnf26\" (UniqueName: \"kubernetes.io/projected/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-kube-api-access-vnf26\") on node \"crc\" DevicePath \"\"" Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.132005 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.490174 5014 generic.go:334] "Generic (PLEG): container finished" podID="e454ee45-0feb-43ae-9c4e-7e93b8a3ef80" containerID="e11a68d292aca4580443b8bf9fb16826911472fbef500648e9edf270cd536961" exitCode=0 Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.490217 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ng597" event={"ID":"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80","Type":"ContainerDied","Data":"e11a68d292aca4580443b8bf9fb16826911472fbef500648e9edf270cd536961"} Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.490246 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ng597" event={"ID":"e454ee45-0feb-43ae-9c4e-7e93b8a3ef80","Type":"ContainerDied","Data":"7013ff5da9662af4454fb1aeb2d8fc74faeb6c9a33708bfd84035827f05d8cf3"} Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.490267 5014 scope.go:117] "RemoveContainer" containerID="e11a68d292aca4580443b8bf9fb16826911472fbef500648e9edf270cd536961" Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.490322 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ng597" Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.532191 5014 scope.go:117] "RemoveContainer" containerID="a7bd860b01bf52398d8c1ca1d6ead9b225e2fa152b6390673e5adc8fb5478d65" Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.575298 5014 scope.go:117] "RemoveContainer" containerID="a0c1504d05ab7bde0e3562c98d66e164fb9fc8aca0910406af388ee9b9471d56" Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.604421 5014 scope.go:117] "RemoveContainer" containerID="e11a68d292aca4580443b8bf9fb16826911472fbef500648e9edf270cd536961" Oct 06 22:20:52 crc kubenswrapper[5014]: E1006 22:20:52.605215 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e11a68d292aca4580443b8bf9fb16826911472fbef500648e9edf270cd536961\": container with ID starting with e11a68d292aca4580443b8bf9fb16826911472fbef500648e9edf270cd536961 not found: ID does not exist" containerID="e11a68d292aca4580443b8bf9fb16826911472fbef500648e9edf270cd536961" Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.605305 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e11a68d292aca4580443b8bf9fb16826911472fbef500648e9edf270cd536961"} err="failed to get container status \"e11a68d292aca4580443b8bf9fb16826911472fbef500648e9edf270cd536961\": rpc error: code = NotFound desc = could not find container \"e11a68d292aca4580443b8bf9fb16826911472fbef500648e9edf270cd536961\": container with ID starting with e11a68d292aca4580443b8bf9fb16826911472fbef500648e9edf270cd536961 not found: ID does not exist" Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.605353 5014 scope.go:117] "RemoveContainer" containerID="a7bd860b01bf52398d8c1ca1d6ead9b225e2fa152b6390673e5adc8fb5478d65" Oct 06 22:20:52 crc kubenswrapper[5014]: E1006 22:20:52.606094 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7bd860b01bf52398d8c1ca1d6ead9b225e2fa152b6390673e5adc8fb5478d65\": container with ID starting with a7bd860b01bf52398d8c1ca1d6ead9b225e2fa152b6390673e5adc8fb5478d65 not found: ID does not exist" containerID="a7bd860b01bf52398d8c1ca1d6ead9b225e2fa152b6390673e5adc8fb5478d65" Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.606175 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7bd860b01bf52398d8c1ca1d6ead9b225e2fa152b6390673e5adc8fb5478d65"} err="failed to get container status \"a7bd860b01bf52398d8c1ca1d6ead9b225e2fa152b6390673e5adc8fb5478d65\": rpc error: code = NotFound desc = could not find container \"a7bd860b01bf52398d8c1ca1d6ead9b225e2fa152b6390673e5adc8fb5478d65\": container with ID starting with a7bd860b01bf52398d8c1ca1d6ead9b225e2fa152b6390673e5adc8fb5478d65 not found: ID does not exist" Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.606229 5014 scope.go:117] "RemoveContainer" containerID="a0c1504d05ab7bde0e3562c98d66e164fb9fc8aca0910406af388ee9b9471d56" Oct 06 22:20:52 crc kubenswrapper[5014]: E1006 22:20:52.606756 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0c1504d05ab7bde0e3562c98d66e164fb9fc8aca0910406af388ee9b9471d56\": container with ID starting with a0c1504d05ab7bde0e3562c98d66e164fb9fc8aca0910406af388ee9b9471d56 not found: ID does not exist" containerID="a0c1504d05ab7bde0e3562c98d66e164fb9fc8aca0910406af388ee9b9471d56" Oct 06 22:20:52 crc kubenswrapper[5014]: I1006 22:20:52.606829 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0c1504d05ab7bde0e3562c98d66e164fb9fc8aca0910406af388ee9b9471d56"} err="failed to get container status \"a0c1504d05ab7bde0e3562c98d66e164fb9fc8aca0910406af388ee9b9471d56\": rpc error: code = NotFound desc = could not find container \"a0c1504d05ab7bde0e3562c98d66e164fb9fc8aca0910406af388ee9b9471d56\": container with ID starting with a0c1504d05ab7bde0e3562c98d66e164fb9fc8aca0910406af388ee9b9471d56 not found: ID does not exist" Oct 06 22:20:53 crc kubenswrapper[5014]: I1006 22:20:53.439019 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e454ee45-0feb-43ae-9c4e-7e93b8a3ef80" (UID: "e454ee45-0feb-43ae-9c4e-7e93b8a3ef80"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:20:53 crc kubenswrapper[5014]: I1006 22:20:53.457658 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:20:53 crc kubenswrapper[5014]: I1006 22:20:53.719830 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ng597"] Oct 06 22:20:53 crc kubenswrapper[5014]: I1006 22:20:53.728246 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ng597"] Oct 06 22:20:54 crc kubenswrapper[5014]: I1006 22:20:54.754887 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:54 crc kubenswrapper[5014]: I1006 22:20:54.754975 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:54 crc kubenswrapper[5014]: I1006 22:20:54.837186 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:55 crc kubenswrapper[5014]: I1006 22:20:55.524972 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e454ee45-0feb-43ae-9c4e-7e93b8a3ef80" path="/var/lib/kubelet/pods/e454ee45-0feb-43ae-9c4e-7e93b8a3ef80/volumes" Oct 06 22:20:55 crc kubenswrapper[5014]: I1006 22:20:55.581119 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:56 crc kubenswrapper[5014]: I1006 22:20:56.599352 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gnn25"] Oct 06 22:20:57 crc kubenswrapper[5014]: I1006 22:20:57.568929 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gnn25" podUID="6755ca71-09d7-4ff9-b3dd-fef8a8f87738" containerName="registry-server" containerID="cri-o://3dd69711c71294e1e95f3b59e0bf6491b14f1f38cbac93b9c456bac218f10dbf" gracePeriod=2 Oct 06 22:20:57 crc kubenswrapper[5014]: E1006 22:20:57.786913 5014 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6755ca71_09d7_4ff9_b3dd_fef8a8f87738.slice/crio-conmon-3dd69711c71294e1e95f3b59e0bf6491b14f1f38cbac93b9c456bac218f10dbf.scope\": RecentStats: unable to find data in memory cache]" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.063528 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.170145 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ddb9x\" (UniqueName: \"kubernetes.io/projected/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-kube-api-access-ddb9x\") pod \"6755ca71-09d7-4ff9-b3dd-fef8a8f87738\" (UID: \"6755ca71-09d7-4ff9-b3dd-fef8a8f87738\") " Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.170265 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-utilities\") pod \"6755ca71-09d7-4ff9-b3dd-fef8a8f87738\" (UID: \"6755ca71-09d7-4ff9-b3dd-fef8a8f87738\") " Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.170379 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-catalog-content\") pod \"6755ca71-09d7-4ff9-b3dd-fef8a8f87738\" (UID: \"6755ca71-09d7-4ff9-b3dd-fef8a8f87738\") " Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.172113 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-utilities" (OuterVolumeSpecName: "utilities") pod "6755ca71-09d7-4ff9-b3dd-fef8a8f87738" (UID: "6755ca71-09d7-4ff9-b3dd-fef8a8f87738"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.181049 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-kube-api-access-ddb9x" (OuterVolumeSpecName: "kube-api-access-ddb9x") pod "6755ca71-09d7-4ff9-b3dd-fef8a8f87738" (UID: "6755ca71-09d7-4ff9-b3dd-fef8a8f87738"). InnerVolumeSpecName "kube-api-access-ddb9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.272588 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ddb9x\" (UniqueName: \"kubernetes.io/projected/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-kube-api-access-ddb9x\") on node \"crc\" DevicePath \"\"" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.272686 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.296588 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6755ca71-09d7-4ff9-b3dd-fef8a8f87738" (UID: "6755ca71-09d7-4ff9-b3dd-fef8a8f87738"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.373975 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6755ca71-09d7-4ff9-b3dd-fef8a8f87738-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.583577 5014 generic.go:334] "Generic (PLEG): container finished" podID="6755ca71-09d7-4ff9-b3dd-fef8a8f87738" containerID="3dd69711c71294e1e95f3b59e0bf6491b14f1f38cbac93b9c456bac218f10dbf" exitCode=0 Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.583668 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gnn25" event={"ID":"6755ca71-09d7-4ff9-b3dd-fef8a8f87738","Type":"ContainerDied","Data":"3dd69711c71294e1e95f3b59e0bf6491b14f1f38cbac93b9c456bac218f10dbf"} Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.583719 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gnn25" event={"ID":"6755ca71-09d7-4ff9-b3dd-fef8a8f87738","Type":"ContainerDied","Data":"c759fa6256002a5156d2f40c61079833e0bb648d843293cb619d093966cc107c"} Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.583748 5014 scope.go:117] "RemoveContainer" containerID="3dd69711c71294e1e95f3b59e0bf6491b14f1f38cbac93b9c456bac218f10dbf" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.583782 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gnn25" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.618221 5014 scope.go:117] "RemoveContainer" containerID="dc75585fac820a190c14bedc98a3db59cff4f22fc9ffda8712f193de4948ac3e" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.642101 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gnn25"] Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.646980 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gnn25"] Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.657304 5014 scope.go:117] "RemoveContainer" containerID="a234da4874d82f5a9f5f25fcfad721373c69b694982509ad2633914d5ca1dbaa" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.714891 5014 scope.go:117] "RemoveContainer" containerID="3dd69711c71294e1e95f3b59e0bf6491b14f1f38cbac93b9c456bac218f10dbf" Oct 06 22:20:58 crc kubenswrapper[5014]: E1006 22:20:58.715565 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3dd69711c71294e1e95f3b59e0bf6491b14f1f38cbac93b9c456bac218f10dbf\": container with ID starting with 3dd69711c71294e1e95f3b59e0bf6491b14f1f38cbac93b9c456bac218f10dbf not found: ID does not exist" containerID="3dd69711c71294e1e95f3b59e0bf6491b14f1f38cbac93b9c456bac218f10dbf" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.715644 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3dd69711c71294e1e95f3b59e0bf6491b14f1f38cbac93b9c456bac218f10dbf"} err="failed to get container status \"3dd69711c71294e1e95f3b59e0bf6491b14f1f38cbac93b9c456bac218f10dbf\": rpc error: code = NotFound desc = could not find container \"3dd69711c71294e1e95f3b59e0bf6491b14f1f38cbac93b9c456bac218f10dbf\": container with ID starting with 3dd69711c71294e1e95f3b59e0bf6491b14f1f38cbac93b9c456bac218f10dbf not found: ID does not exist" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.715680 5014 scope.go:117] "RemoveContainer" containerID="dc75585fac820a190c14bedc98a3db59cff4f22fc9ffda8712f193de4948ac3e" Oct 06 22:20:58 crc kubenswrapper[5014]: E1006 22:20:58.716088 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc75585fac820a190c14bedc98a3db59cff4f22fc9ffda8712f193de4948ac3e\": container with ID starting with dc75585fac820a190c14bedc98a3db59cff4f22fc9ffda8712f193de4948ac3e not found: ID does not exist" containerID="dc75585fac820a190c14bedc98a3db59cff4f22fc9ffda8712f193de4948ac3e" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.716158 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc75585fac820a190c14bedc98a3db59cff4f22fc9ffda8712f193de4948ac3e"} err="failed to get container status \"dc75585fac820a190c14bedc98a3db59cff4f22fc9ffda8712f193de4948ac3e\": rpc error: code = NotFound desc = could not find container \"dc75585fac820a190c14bedc98a3db59cff4f22fc9ffda8712f193de4948ac3e\": container with ID starting with dc75585fac820a190c14bedc98a3db59cff4f22fc9ffda8712f193de4948ac3e not found: ID does not exist" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.716205 5014 scope.go:117] "RemoveContainer" containerID="a234da4874d82f5a9f5f25fcfad721373c69b694982509ad2633914d5ca1dbaa" Oct 06 22:20:58 crc kubenswrapper[5014]: E1006 22:20:58.716763 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a234da4874d82f5a9f5f25fcfad721373c69b694982509ad2633914d5ca1dbaa\": container with ID starting with a234da4874d82f5a9f5f25fcfad721373c69b694982509ad2633914d5ca1dbaa not found: ID does not exist" containerID="a234da4874d82f5a9f5f25fcfad721373c69b694982509ad2633914d5ca1dbaa" Oct 06 22:20:58 crc kubenswrapper[5014]: I1006 22:20:58.716816 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a234da4874d82f5a9f5f25fcfad721373c69b694982509ad2633914d5ca1dbaa"} err="failed to get container status \"a234da4874d82f5a9f5f25fcfad721373c69b694982509ad2633914d5ca1dbaa\": rpc error: code = NotFound desc = could not find container \"a234da4874d82f5a9f5f25fcfad721373c69b694982509ad2633914d5ca1dbaa\": container with ID starting with a234da4874d82f5a9f5f25fcfad721373c69b694982509ad2633914d5ca1dbaa not found: ID does not exist" Oct 06 22:20:59 crc kubenswrapper[5014]: I1006 22:20:59.498009 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6755ca71-09d7-4ff9-b3dd-fef8a8f87738" path="/var/lib/kubelet/pods/6755ca71-09d7-4ff9-b3dd-fef8a8f87738/volumes" Oct 06 22:21:21 crc kubenswrapper[5014]: I1006 22:21:21.735039 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:21:21 crc kubenswrapper[5014]: I1006 22:21:21.735866 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:21:51 crc kubenswrapper[5014]: I1006 22:21:51.735114 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:21:51 crc kubenswrapper[5014]: I1006 22:21:51.735809 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:22:21 crc kubenswrapper[5014]: I1006 22:22:21.735039 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:22:21 crc kubenswrapper[5014]: I1006 22:22:21.735769 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:22:21 crc kubenswrapper[5014]: I1006 22:22:21.735839 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 22:22:21 crc kubenswrapper[5014]: I1006 22:22:21.736867 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 22:22:21 crc kubenswrapper[5014]: I1006 22:22:21.736953 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" gracePeriod=600 Oct 06 22:22:21 crc kubenswrapper[5014]: E1006 22:22:21.864398 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:22:22 crc kubenswrapper[5014]: I1006 22:22:22.467824 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" exitCode=0 Oct 06 22:22:22 crc kubenswrapper[5014]: I1006 22:22:22.467886 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b"} Oct 06 22:22:22 crc kubenswrapper[5014]: I1006 22:22:22.467936 5014 scope.go:117] "RemoveContainer" containerID="dcc0d26c271b15739956d985f7ad3120a8d4e8910b238b26753fb5936d6e11b2" Oct 06 22:22:22 crc kubenswrapper[5014]: I1006 22:22:22.468705 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:22:22 crc kubenswrapper[5014]: E1006 22:22:22.469240 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:22:35 crc kubenswrapper[5014]: I1006 22:22:35.488572 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:22:35 crc kubenswrapper[5014]: E1006 22:22:35.489277 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:22:49 crc kubenswrapper[5014]: I1006 22:22:49.485733 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:22:49 crc kubenswrapper[5014]: E1006 22:22:49.486835 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:23:00 crc kubenswrapper[5014]: I1006 22:23:00.484577 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:23:00 crc kubenswrapper[5014]: E1006 22:23:00.485442 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:23:01 crc kubenswrapper[5014]: I1006 22:23:01.946239 5014 scope.go:117] "RemoveContainer" containerID="c109b3ee633f5b0e14cb7308dedc7b8fccf506f00599afbcd95c065b1e0b3213" Oct 06 22:23:01 crc kubenswrapper[5014]: I1006 22:23:01.983053 5014 scope.go:117] "RemoveContainer" containerID="fdc34b94ae3a831e9f72da48e0adfe20e02dcb8da88e4bbbf4cd3023b23e4136" Oct 06 22:23:02 crc kubenswrapper[5014]: I1006 22:23:02.011022 5014 scope.go:117] "RemoveContainer" containerID="86cafe713c11838a3365e13de270dfe7535fddaad6fe1cb52e0d6deae522a05c" Oct 06 22:23:13 crc kubenswrapper[5014]: I1006 22:23:13.484810 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:23:13 crc kubenswrapper[5014]: E1006 22:23:13.486285 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:23:28 crc kubenswrapper[5014]: I1006 22:23:28.485032 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:23:28 crc kubenswrapper[5014]: E1006 22:23:28.486025 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:23:42 crc kubenswrapper[5014]: I1006 22:23:42.484715 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:23:42 crc kubenswrapper[5014]: E1006 22:23:42.485793 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:23:57 crc kubenswrapper[5014]: I1006 22:23:57.486121 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:23:57 crc kubenswrapper[5014]: E1006 22:23:57.487285 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:24:09 crc kubenswrapper[5014]: I1006 22:24:09.485310 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:24:09 crc kubenswrapper[5014]: E1006 22:24:09.486385 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:24:23 crc kubenswrapper[5014]: I1006 22:24:23.488572 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:24:23 crc kubenswrapper[5014]: E1006 22:24:23.491060 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:24:36 crc kubenswrapper[5014]: I1006 22:24:36.486065 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:24:36 crc kubenswrapper[5014]: E1006 22:24:36.487760 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:24:50 crc kubenswrapper[5014]: I1006 22:24:50.484614 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:24:50 crc kubenswrapper[5014]: E1006 22:24:50.485965 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:25:03 crc kubenswrapper[5014]: I1006 22:25:03.485270 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:25:03 crc kubenswrapper[5014]: E1006 22:25:03.486471 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:25:16 crc kubenswrapper[5014]: I1006 22:25:16.485256 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:25:16 crc kubenswrapper[5014]: E1006 22:25:16.486407 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:25:29 crc kubenswrapper[5014]: I1006 22:25:29.485063 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:25:29 crc kubenswrapper[5014]: E1006 22:25:29.486014 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:25:43 crc kubenswrapper[5014]: I1006 22:25:43.485232 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:25:43 crc kubenswrapper[5014]: E1006 22:25:43.486315 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:25:56 crc kubenswrapper[5014]: I1006 22:25:56.485963 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:25:56 crc kubenswrapper[5014]: E1006 22:25:56.487333 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:26:09 crc kubenswrapper[5014]: I1006 22:26:09.485928 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:26:09 crc kubenswrapper[5014]: E1006 22:26:09.489241 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:26:24 crc kubenswrapper[5014]: I1006 22:26:24.485218 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:26:24 crc kubenswrapper[5014]: E1006 22:26:24.486671 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.559947 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dctrr"] Oct 06 22:26:30 crc kubenswrapper[5014]: E1006 22:26:30.561272 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e454ee45-0feb-43ae-9c4e-7e93b8a3ef80" containerName="extract-utilities" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.561306 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="e454ee45-0feb-43ae-9c4e-7e93b8a3ef80" containerName="extract-utilities" Oct 06 22:26:30 crc kubenswrapper[5014]: E1006 22:26:30.561363 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e454ee45-0feb-43ae-9c4e-7e93b8a3ef80" containerName="extract-content" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.561377 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="e454ee45-0feb-43ae-9c4e-7e93b8a3ef80" containerName="extract-content" Oct 06 22:26:30 crc kubenswrapper[5014]: E1006 22:26:30.561400 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6755ca71-09d7-4ff9-b3dd-fef8a8f87738" containerName="extract-content" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.561450 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="6755ca71-09d7-4ff9-b3dd-fef8a8f87738" containerName="extract-content" Oct 06 22:26:30 crc kubenswrapper[5014]: E1006 22:26:30.561470 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6755ca71-09d7-4ff9-b3dd-fef8a8f87738" containerName="extract-utilities" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.561482 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="6755ca71-09d7-4ff9-b3dd-fef8a8f87738" containerName="extract-utilities" Oct 06 22:26:30 crc kubenswrapper[5014]: E1006 22:26:30.561543 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6755ca71-09d7-4ff9-b3dd-fef8a8f87738" containerName="registry-server" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.561557 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="6755ca71-09d7-4ff9-b3dd-fef8a8f87738" containerName="registry-server" Oct 06 22:26:30 crc kubenswrapper[5014]: E1006 22:26:30.561606 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e454ee45-0feb-43ae-9c4e-7e93b8a3ef80" containerName="registry-server" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.561646 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="e454ee45-0feb-43ae-9c4e-7e93b8a3ef80" containerName="registry-server" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.561910 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="e454ee45-0feb-43ae-9c4e-7e93b8a3ef80" containerName="registry-server" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.561941 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="6755ca71-09d7-4ff9-b3dd-fef8a8f87738" containerName="registry-server" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.564195 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.576321 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dctrr"] Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.683976 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36536274-1321-45be-bb66-eed1b007374f-catalog-content\") pod \"certified-operators-dctrr\" (UID: \"36536274-1321-45be-bb66-eed1b007374f\") " pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.684061 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36536274-1321-45be-bb66-eed1b007374f-utilities\") pod \"certified-operators-dctrr\" (UID: \"36536274-1321-45be-bb66-eed1b007374f\") " pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.684197 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42rjx\" (UniqueName: \"kubernetes.io/projected/36536274-1321-45be-bb66-eed1b007374f-kube-api-access-42rjx\") pod \"certified-operators-dctrr\" (UID: \"36536274-1321-45be-bb66-eed1b007374f\") " pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.785454 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36536274-1321-45be-bb66-eed1b007374f-catalog-content\") pod \"certified-operators-dctrr\" (UID: \"36536274-1321-45be-bb66-eed1b007374f\") " pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.785499 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36536274-1321-45be-bb66-eed1b007374f-utilities\") pod \"certified-operators-dctrr\" (UID: \"36536274-1321-45be-bb66-eed1b007374f\") " pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.785563 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42rjx\" (UniqueName: \"kubernetes.io/projected/36536274-1321-45be-bb66-eed1b007374f-kube-api-access-42rjx\") pod \"certified-operators-dctrr\" (UID: \"36536274-1321-45be-bb66-eed1b007374f\") " pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.786399 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36536274-1321-45be-bb66-eed1b007374f-utilities\") pod \"certified-operators-dctrr\" (UID: \"36536274-1321-45be-bb66-eed1b007374f\") " pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.786717 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36536274-1321-45be-bb66-eed1b007374f-catalog-content\") pod \"certified-operators-dctrr\" (UID: \"36536274-1321-45be-bb66-eed1b007374f\") " pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.810563 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42rjx\" (UniqueName: \"kubernetes.io/projected/36536274-1321-45be-bb66-eed1b007374f-kube-api-access-42rjx\") pod \"certified-operators-dctrr\" (UID: \"36536274-1321-45be-bb66-eed1b007374f\") " pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:30 crc kubenswrapper[5014]: I1006 22:26:30.889896 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:31 crc kubenswrapper[5014]: I1006 22:26:31.160270 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dctrr"] Oct 06 22:26:31 crc kubenswrapper[5014]: I1006 22:26:31.835697 5014 generic.go:334] "Generic (PLEG): container finished" podID="36536274-1321-45be-bb66-eed1b007374f" containerID="487f8ba2ac0846b8eb9e5b0d35cc20e059093a1ee03bdee1e67bbcaa9866adcb" exitCode=0 Oct 06 22:26:31 crc kubenswrapper[5014]: I1006 22:26:31.835774 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dctrr" event={"ID":"36536274-1321-45be-bb66-eed1b007374f","Type":"ContainerDied","Data":"487f8ba2ac0846b8eb9e5b0d35cc20e059093a1ee03bdee1e67bbcaa9866adcb"} Oct 06 22:26:31 crc kubenswrapper[5014]: I1006 22:26:31.837713 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dctrr" event={"ID":"36536274-1321-45be-bb66-eed1b007374f","Type":"ContainerStarted","Data":"d500f9beec5b80acdf7c2b2b1a35b7e6d9cc77ce25c8b108f86a7fb8a47af777"} Oct 06 22:26:31 crc kubenswrapper[5014]: I1006 22:26:31.838077 5014 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 06 22:26:32 crc kubenswrapper[5014]: I1006 22:26:32.849567 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dctrr" event={"ID":"36536274-1321-45be-bb66-eed1b007374f","Type":"ContainerStarted","Data":"6af5a4dc7f1e7fa648a8a23d676c0674616c18bf7e765a5b3200ce4457054376"} Oct 06 22:26:33 crc kubenswrapper[5014]: I1006 22:26:33.862660 5014 generic.go:334] "Generic (PLEG): container finished" podID="36536274-1321-45be-bb66-eed1b007374f" containerID="6af5a4dc7f1e7fa648a8a23d676c0674616c18bf7e765a5b3200ce4457054376" exitCode=0 Oct 06 22:26:33 crc kubenswrapper[5014]: I1006 22:26:33.862728 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dctrr" event={"ID":"36536274-1321-45be-bb66-eed1b007374f","Type":"ContainerDied","Data":"6af5a4dc7f1e7fa648a8a23d676c0674616c18bf7e765a5b3200ce4457054376"} Oct 06 22:26:34 crc kubenswrapper[5014]: I1006 22:26:34.873606 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dctrr" event={"ID":"36536274-1321-45be-bb66-eed1b007374f","Type":"ContainerStarted","Data":"d768661c80bc0f32be13fa38a448131b2d93250f87b45a81a33d1bc2850e7358"} Oct 06 22:26:34 crc kubenswrapper[5014]: I1006 22:26:34.899860 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dctrr" podStartSLOduration=2.380164708 podStartE2EDuration="4.899839317s" podCreationTimestamp="2025-10-06 22:26:30 +0000 UTC" firstStartedPulling="2025-10-06 22:26:31.837641669 +0000 UTC m=+3337.130678443" lastFinishedPulling="2025-10-06 22:26:34.357316278 +0000 UTC m=+3339.650353052" observedRunningTime="2025-10-06 22:26:34.895312735 +0000 UTC m=+3340.188349509" watchObservedRunningTime="2025-10-06 22:26:34.899839317 +0000 UTC m=+3340.192876081" Oct 06 22:26:35 crc kubenswrapper[5014]: I1006 22:26:35.514579 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:26:35 crc kubenswrapper[5014]: E1006 22:26:35.515227 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:26:40 crc kubenswrapper[5014]: I1006 22:26:40.890866 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:40 crc kubenswrapper[5014]: I1006 22:26:40.891241 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:40 crc kubenswrapper[5014]: I1006 22:26:40.972164 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:41 crc kubenswrapper[5014]: I1006 22:26:41.021543 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:41 crc kubenswrapper[5014]: I1006 22:26:41.219762 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dctrr"] Oct 06 22:26:42 crc kubenswrapper[5014]: I1006 22:26:42.944078 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dctrr" podUID="36536274-1321-45be-bb66-eed1b007374f" containerName="registry-server" containerID="cri-o://d768661c80bc0f32be13fa38a448131b2d93250f87b45a81a33d1bc2850e7358" gracePeriod=2 Oct 06 22:26:43 crc kubenswrapper[5014]: I1006 22:26:43.454545 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:43 crc kubenswrapper[5014]: I1006 22:26:43.593361 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36536274-1321-45be-bb66-eed1b007374f-catalog-content\") pod \"36536274-1321-45be-bb66-eed1b007374f\" (UID: \"36536274-1321-45be-bb66-eed1b007374f\") " Oct 06 22:26:43 crc kubenswrapper[5014]: I1006 22:26:43.593502 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-42rjx\" (UniqueName: \"kubernetes.io/projected/36536274-1321-45be-bb66-eed1b007374f-kube-api-access-42rjx\") pod \"36536274-1321-45be-bb66-eed1b007374f\" (UID: \"36536274-1321-45be-bb66-eed1b007374f\") " Oct 06 22:26:43 crc kubenswrapper[5014]: I1006 22:26:43.593554 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36536274-1321-45be-bb66-eed1b007374f-utilities\") pod \"36536274-1321-45be-bb66-eed1b007374f\" (UID: \"36536274-1321-45be-bb66-eed1b007374f\") " Oct 06 22:26:43 crc kubenswrapper[5014]: I1006 22:26:43.594772 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36536274-1321-45be-bb66-eed1b007374f-utilities" (OuterVolumeSpecName: "utilities") pod "36536274-1321-45be-bb66-eed1b007374f" (UID: "36536274-1321-45be-bb66-eed1b007374f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:26:43 crc kubenswrapper[5014]: I1006 22:26:43.602776 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36536274-1321-45be-bb66-eed1b007374f-kube-api-access-42rjx" (OuterVolumeSpecName: "kube-api-access-42rjx") pod "36536274-1321-45be-bb66-eed1b007374f" (UID: "36536274-1321-45be-bb66-eed1b007374f"). InnerVolumeSpecName "kube-api-access-42rjx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:26:43 crc kubenswrapper[5014]: I1006 22:26:43.696095 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-42rjx\" (UniqueName: \"kubernetes.io/projected/36536274-1321-45be-bb66-eed1b007374f-kube-api-access-42rjx\") on node \"crc\" DevicePath \"\"" Oct 06 22:26:43 crc kubenswrapper[5014]: I1006 22:26:43.696143 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36536274-1321-45be-bb66-eed1b007374f-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:26:43 crc kubenswrapper[5014]: I1006 22:26:43.700434 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36536274-1321-45be-bb66-eed1b007374f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "36536274-1321-45be-bb66-eed1b007374f" (UID: "36536274-1321-45be-bb66-eed1b007374f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:26:43 crc kubenswrapper[5014]: I1006 22:26:43.798219 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36536274-1321-45be-bb66-eed1b007374f-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:26:43 crc kubenswrapper[5014]: I1006 22:26:43.959918 5014 generic.go:334] "Generic (PLEG): container finished" podID="36536274-1321-45be-bb66-eed1b007374f" containerID="d768661c80bc0f32be13fa38a448131b2d93250f87b45a81a33d1bc2850e7358" exitCode=0 Oct 06 22:26:43 crc kubenswrapper[5014]: I1006 22:26:43.960032 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dctrr" Oct 06 22:26:43 crc kubenswrapper[5014]: I1006 22:26:43.960856 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dctrr" event={"ID":"36536274-1321-45be-bb66-eed1b007374f","Type":"ContainerDied","Data":"d768661c80bc0f32be13fa38a448131b2d93250f87b45a81a33d1bc2850e7358"} Oct 06 22:26:43 crc kubenswrapper[5014]: I1006 22:26:43.961106 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dctrr" event={"ID":"36536274-1321-45be-bb66-eed1b007374f","Type":"ContainerDied","Data":"d500f9beec5b80acdf7c2b2b1a35b7e6d9cc77ce25c8b108f86a7fb8a47af777"} Oct 06 22:26:43 crc kubenswrapper[5014]: I1006 22:26:43.961220 5014 scope.go:117] "RemoveContainer" containerID="d768661c80bc0f32be13fa38a448131b2d93250f87b45a81a33d1bc2850e7358" Oct 06 22:26:43 crc kubenswrapper[5014]: I1006 22:26:43.993351 5014 scope.go:117] "RemoveContainer" containerID="6af5a4dc7f1e7fa648a8a23d676c0674616c18bf7e765a5b3200ce4457054376" Oct 06 22:26:44 crc kubenswrapper[5014]: I1006 22:26:44.025777 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dctrr"] Oct 06 22:26:44 crc kubenswrapper[5014]: I1006 22:26:44.034608 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dctrr"] Oct 06 22:26:44 crc kubenswrapper[5014]: I1006 22:26:44.056825 5014 scope.go:117] "RemoveContainer" containerID="487f8ba2ac0846b8eb9e5b0d35cc20e059093a1ee03bdee1e67bbcaa9866adcb" Oct 06 22:26:44 crc kubenswrapper[5014]: I1006 22:26:44.080638 5014 scope.go:117] "RemoveContainer" containerID="d768661c80bc0f32be13fa38a448131b2d93250f87b45a81a33d1bc2850e7358" Oct 06 22:26:44 crc kubenswrapper[5014]: E1006 22:26:44.081080 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d768661c80bc0f32be13fa38a448131b2d93250f87b45a81a33d1bc2850e7358\": container with ID starting with d768661c80bc0f32be13fa38a448131b2d93250f87b45a81a33d1bc2850e7358 not found: ID does not exist" containerID="d768661c80bc0f32be13fa38a448131b2d93250f87b45a81a33d1bc2850e7358" Oct 06 22:26:44 crc kubenswrapper[5014]: I1006 22:26:44.081270 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d768661c80bc0f32be13fa38a448131b2d93250f87b45a81a33d1bc2850e7358"} err="failed to get container status \"d768661c80bc0f32be13fa38a448131b2d93250f87b45a81a33d1bc2850e7358\": rpc error: code = NotFound desc = could not find container \"d768661c80bc0f32be13fa38a448131b2d93250f87b45a81a33d1bc2850e7358\": container with ID starting with d768661c80bc0f32be13fa38a448131b2d93250f87b45a81a33d1bc2850e7358 not found: ID does not exist" Oct 06 22:26:44 crc kubenswrapper[5014]: I1006 22:26:44.081442 5014 scope.go:117] "RemoveContainer" containerID="6af5a4dc7f1e7fa648a8a23d676c0674616c18bf7e765a5b3200ce4457054376" Oct 06 22:26:44 crc kubenswrapper[5014]: E1006 22:26:44.082164 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6af5a4dc7f1e7fa648a8a23d676c0674616c18bf7e765a5b3200ce4457054376\": container with ID starting with 6af5a4dc7f1e7fa648a8a23d676c0674616c18bf7e765a5b3200ce4457054376 not found: ID does not exist" containerID="6af5a4dc7f1e7fa648a8a23d676c0674616c18bf7e765a5b3200ce4457054376" Oct 06 22:26:44 crc kubenswrapper[5014]: I1006 22:26:44.082222 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6af5a4dc7f1e7fa648a8a23d676c0674616c18bf7e765a5b3200ce4457054376"} err="failed to get container status \"6af5a4dc7f1e7fa648a8a23d676c0674616c18bf7e765a5b3200ce4457054376\": rpc error: code = NotFound desc = could not find container \"6af5a4dc7f1e7fa648a8a23d676c0674616c18bf7e765a5b3200ce4457054376\": container with ID starting with 6af5a4dc7f1e7fa648a8a23d676c0674616c18bf7e765a5b3200ce4457054376 not found: ID does not exist" Oct 06 22:26:44 crc kubenswrapper[5014]: I1006 22:26:44.082255 5014 scope.go:117] "RemoveContainer" containerID="487f8ba2ac0846b8eb9e5b0d35cc20e059093a1ee03bdee1e67bbcaa9866adcb" Oct 06 22:26:44 crc kubenswrapper[5014]: E1006 22:26:44.082725 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"487f8ba2ac0846b8eb9e5b0d35cc20e059093a1ee03bdee1e67bbcaa9866adcb\": container with ID starting with 487f8ba2ac0846b8eb9e5b0d35cc20e059093a1ee03bdee1e67bbcaa9866adcb not found: ID does not exist" containerID="487f8ba2ac0846b8eb9e5b0d35cc20e059093a1ee03bdee1e67bbcaa9866adcb" Oct 06 22:26:44 crc kubenswrapper[5014]: I1006 22:26:44.082899 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"487f8ba2ac0846b8eb9e5b0d35cc20e059093a1ee03bdee1e67bbcaa9866adcb"} err="failed to get container status \"487f8ba2ac0846b8eb9e5b0d35cc20e059093a1ee03bdee1e67bbcaa9866adcb\": rpc error: code = NotFound desc = could not find container \"487f8ba2ac0846b8eb9e5b0d35cc20e059093a1ee03bdee1e67bbcaa9866adcb\": container with ID starting with 487f8ba2ac0846b8eb9e5b0d35cc20e059093a1ee03bdee1e67bbcaa9866adcb not found: ID does not exist" Oct 06 22:26:45 crc kubenswrapper[5014]: I1006 22:26:45.499559 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36536274-1321-45be-bb66-eed1b007374f" path="/var/lib/kubelet/pods/36536274-1321-45be-bb66-eed1b007374f/volumes" Oct 06 22:26:46 crc kubenswrapper[5014]: I1006 22:26:46.484486 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:26:46 crc kubenswrapper[5014]: E1006 22:26:46.484970 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:26:57 crc kubenswrapper[5014]: I1006 22:26:57.485001 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:26:57 crc kubenswrapper[5014]: E1006 22:26:57.486164 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:27:08 crc kubenswrapper[5014]: I1006 22:27:08.485703 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:27:08 crc kubenswrapper[5014]: E1006 22:27:08.487136 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:27:23 crc kubenswrapper[5014]: I1006 22:27:23.484372 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:27:24 crc kubenswrapper[5014]: I1006 22:27:24.356044 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"798de3444f7c40078c43c5e21f686ecd23ef225d8de60b2170e70ab24cc2022c"} Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.597264 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nx97p"] Oct 06 22:29:40 crc kubenswrapper[5014]: E1006 22:29:40.598379 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36536274-1321-45be-bb66-eed1b007374f" containerName="extract-content" Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.598400 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="36536274-1321-45be-bb66-eed1b007374f" containerName="extract-content" Oct 06 22:29:40 crc kubenswrapper[5014]: E1006 22:29:40.598420 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36536274-1321-45be-bb66-eed1b007374f" containerName="registry-server" Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.598432 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="36536274-1321-45be-bb66-eed1b007374f" containerName="registry-server" Oct 06 22:29:40 crc kubenswrapper[5014]: E1006 22:29:40.598452 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36536274-1321-45be-bb66-eed1b007374f" containerName="extract-utilities" Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.598467 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="36536274-1321-45be-bb66-eed1b007374f" containerName="extract-utilities" Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.598758 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="36536274-1321-45be-bb66-eed1b007374f" containerName="registry-server" Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.602884 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.623347 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nx97p"] Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.730922 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd103acc-8138-4b1b-a879-1ba8b5a64796-catalog-content\") pod \"redhat-marketplace-nx97p\" (UID: \"fd103acc-8138-4b1b-a879-1ba8b5a64796\") " pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.731068 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd103acc-8138-4b1b-a879-1ba8b5a64796-utilities\") pod \"redhat-marketplace-nx97p\" (UID: \"fd103acc-8138-4b1b-a879-1ba8b5a64796\") " pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.731146 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxxr8\" (UniqueName: \"kubernetes.io/projected/fd103acc-8138-4b1b-a879-1ba8b5a64796-kube-api-access-bxxr8\") pod \"redhat-marketplace-nx97p\" (UID: \"fd103acc-8138-4b1b-a879-1ba8b5a64796\") " pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.832784 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd103acc-8138-4b1b-a879-1ba8b5a64796-catalog-content\") pod \"redhat-marketplace-nx97p\" (UID: \"fd103acc-8138-4b1b-a879-1ba8b5a64796\") " pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.832896 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd103acc-8138-4b1b-a879-1ba8b5a64796-utilities\") pod \"redhat-marketplace-nx97p\" (UID: \"fd103acc-8138-4b1b-a879-1ba8b5a64796\") " pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.832955 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxxr8\" (UniqueName: \"kubernetes.io/projected/fd103acc-8138-4b1b-a879-1ba8b5a64796-kube-api-access-bxxr8\") pod \"redhat-marketplace-nx97p\" (UID: \"fd103acc-8138-4b1b-a879-1ba8b5a64796\") " pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.833389 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd103acc-8138-4b1b-a879-1ba8b5a64796-catalog-content\") pod \"redhat-marketplace-nx97p\" (UID: \"fd103acc-8138-4b1b-a879-1ba8b5a64796\") " pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.833726 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd103acc-8138-4b1b-a879-1ba8b5a64796-utilities\") pod \"redhat-marketplace-nx97p\" (UID: \"fd103acc-8138-4b1b-a879-1ba8b5a64796\") " pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.861176 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxxr8\" (UniqueName: \"kubernetes.io/projected/fd103acc-8138-4b1b-a879-1ba8b5a64796-kube-api-access-bxxr8\") pod \"redhat-marketplace-nx97p\" (UID: \"fd103acc-8138-4b1b-a879-1ba8b5a64796\") " pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:40 crc kubenswrapper[5014]: I1006 22:29:40.950321 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:41 crc kubenswrapper[5014]: I1006 22:29:41.398330 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nx97p"] Oct 06 22:29:41 crc kubenswrapper[5014]: I1006 22:29:41.607801 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx97p" event={"ID":"fd103acc-8138-4b1b-a879-1ba8b5a64796","Type":"ContainerStarted","Data":"1a8c77b6023e9fdb6cc980751653fdf4a96429f0a79a270153cc010987fabc24"} Oct 06 22:29:42 crc kubenswrapper[5014]: I1006 22:29:42.621615 5014 generic.go:334] "Generic (PLEG): container finished" podID="fd103acc-8138-4b1b-a879-1ba8b5a64796" containerID="bc130a995a3a931c6935dabd1ec196ed154e2477af4ad3bc3e94ccfa6653e585" exitCode=0 Oct 06 22:29:42 crc kubenswrapper[5014]: I1006 22:29:42.621743 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx97p" event={"ID":"fd103acc-8138-4b1b-a879-1ba8b5a64796","Type":"ContainerDied","Data":"bc130a995a3a931c6935dabd1ec196ed154e2477af4ad3bc3e94ccfa6653e585"} Oct 06 22:29:43 crc kubenswrapper[5014]: I1006 22:29:43.635845 5014 generic.go:334] "Generic (PLEG): container finished" podID="fd103acc-8138-4b1b-a879-1ba8b5a64796" containerID="4f3c34008b5e21393c86698d7952998ee3a57456ae826c56a02eb0a1c2f5cdb5" exitCode=0 Oct 06 22:29:43 crc kubenswrapper[5014]: I1006 22:29:43.635900 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx97p" event={"ID":"fd103acc-8138-4b1b-a879-1ba8b5a64796","Type":"ContainerDied","Data":"4f3c34008b5e21393c86698d7952998ee3a57456ae826c56a02eb0a1c2f5cdb5"} Oct 06 22:29:44 crc kubenswrapper[5014]: I1006 22:29:44.645550 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx97p" event={"ID":"fd103acc-8138-4b1b-a879-1ba8b5a64796","Type":"ContainerStarted","Data":"f223952eccb749fd5fd0abd517282d709cc22b5ae69a96aa00c6c61b31a5030b"} Oct 06 22:29:44 crc kubenswrapper[5014]: I1006 22:29:44.668426 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nx97p" podStartSLOduration=3.252715934 podStartE2EDuration="4.668408799s" podCreationTimestamp="2025-10-06 22:29:40 +0000 UTC" firstStartedPulling="2025-10-06 22:29:42.624271783 +0000 UTC m=+3527.917308557" lastFinishedPulling="2025-10-06 22:29:44.039964658 +0000 UTC m=+3529.333001422" observedRunningTime="2025-10-06 22:29:44.664104684 +0000 UTC m=+3529.957141418" watchObservedRunningTime="2025-10-06 22:29:44.668408799 +0000 UTC m=+3529.961445533" Oct 06 22:29:50 crc kubenswrapper[5014]: I1006 22:29:50.950997 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:50 crc kubenswrapper[5014]: I1006 22:29:50.951613 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:51 crc kubenswrapper[5014]: I1006 22:29:51.017546 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:51 crc kubenswrapper[5014]: I1006 22:29:51.735214 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:29:51 crc kubenswrapper[5014]: I1006 22:29:51.735671 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:29:51 crc kubenswrapper[5014]: I1006 22:29:51.789974 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:51 crc kubenswrapper[5014]: I1006 22:29:51.847608 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nx97p"] Oct 06 22:29:53 crc kubenswrapper[5014]: I1006 22:29:53.730074 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nx97p" podUID="fd103acc-8138-4b1b-a879-1ba8b5a64796" containerName="registry-server" containerID="cri-o://f223952eccb749fd5fd0abd517282d709cc22b5ae69a96aa00c6c61b31a5030b" gracePeriod=2 Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.186884 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.253335 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd103acc-8138-4b1b-a879-1ba8b5a64796-utilities\") pod \"fd103acc-8138-4b1b-a879-1ba8b5a64796\" (UID: \"fd103acc-8138-4b1b-a879-1ba8b5a64796\") " Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.253437 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxxr8\" (UniqueName: \"kubernetes.io/projected/fd103acc-8138-4b1b-a879-1ba8b5a64796-kube-api-access-bxxr8\") pod \"fd103acc-8138-4b1b-a879-1ba8b5a64796\" (UID: \"fd103acc-8138-4b1b-a879-1ba8b5a64796\") " Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.253532 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd103acc-8138-4b1b-a879-1ba8b5a64796-catalog-content\") pod \"fd103acc-8138-4b1b-a879-1ba8b5a64796\" (UID: \"fd103acc-8138-4b1b-a879-1ba8b5a64796\") " Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.254278 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd103acc-8138-4b1b-a879-1ba8b5a64796-utilities" (OuterVolumeSpecName: "utilities") pod "fd103acc-8138-4b1b-a879-1ba8b5a64796" (UID: "fd103acc-8138-4b1b-a879-1ba8b5a64796"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.259561 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd103acc-8138-4b1b-a879-1ba8b5a64796-kube-api-access-bxxr8" (OuterVolumeSpecName: "kube-api-access-bxxr8") pod "fd103acc-8138-4b1b-a879-1ba8b5a64796" (UID: "fd103acc-8138-4b1b-a879-1ba8b5a64796"). InnerVolumeSpecName "kube-api-access-bxxr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.278814 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd103acc-8138-4b1b-a879-1ba8b5a64796-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fd103acc-8138-4b1b-a879-1ba8b5a64796" (UID: "fd103acc-8138-4b1b-a879-1ba8b5a64796"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.354328 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd103acc-8138-4b1b-a879-1ba8b5a64796-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.354356 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd103acc-8138-4b1b-a879-1ba8b5a64796-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.354370 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxxr8\" (UniqueName: \"kubernetes.io/projected/fd103acc-8138-4b1b-a879-1ba8b5a64796-kube-api-access-bxxr8\") on node \"crc\" DevicePath \"\"" Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.744490 5014 generic.go:334] "Generic (PLEG): container finished" podID="fd103acc-8138-4b1b-a879-1ba8b5a64796" containerID="f223952eccb749fd5fd0abd517282d709cc22b5ae69a96aa00c6c61b31a5030b" exitCode=0 Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.744569 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx97p" event={"ID":"fd103acc-8138-4b1b-a879-1ba8b5a64796","Type":"ContainerDied","Data":"f223952eccb749fd5fd0abd517282d709cc22b5ae69a96aa00c6c61b31a5030b"} Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.744665 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx97p" event={"ID":"fd103acc-8138-4b1b-a879-1ba8b5a64796","Type":"ContainerDied","Data":"1a8c77b6023e9fdb6cc980751653fdf4a96429f0a79a270153cc010987fabc24"} Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.744662 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nx97p" Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.744796 5014 scope.go:117] "RemoveContainer" containerID="f223952eccb749fd5fd0abd517282d709cc22b5ae69a96aa00c6c61b31a5030b" Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.776798 5014 scope.go:117] "RemoveContainer" containerID="4f3c34008b5e21393c86698d7952998ee3a57456ae826c56a02eb0a1c2f5cdb5" Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.802575 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nx97p"] Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.811553 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nx97p"] Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.819907 5014 scope.go:117] "RemoveContainer" containerID="bc130a995a3a931c6935dabd1ec196ed154e2477af4ad3bc3e94ccfa6653e585" Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.859248 5014 scope.go:117] "RemoveContainer" containerID="f223952eccb749fd5fd0abd517282d709cc22b5ae69a96aa00c6c61b31a5030b" Oct 06 22:29:54 crc kubenswrapper[5014]: E1006 22:29:54.860010 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f223952eccb749fd5fd0abd517282d709cc22b5ae69a96aa00c6c61b31a5030b\": container with ID starting with f223952eccb749fd5fd0abd517282d709cc22b5ae69a96aa00c6c61b31a5030b not found: ID does not exist" containerID="f223952eccb749fd5fd0abd517282d709cc22b5ae69a96aa00c6c61b31a5030b" Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.860079 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f223952eccb749fd5fd0abd517282d709cc22b5ae69a96aa00c6c61b31a5030b"} err="failed to get container status \"f223952eccb749fd5fd0abd517282d709cc22b5ae69a96aa00c6c61b31a5030b\": rpc error: code = NotFound desc = could not find container \"f223952eccb749fd5fd0abd517282d709cc22b5ae69a96aa00c6c61b31a5030b\": container with ID starting with f223952eccb749fd5fd0abd517282d709cc22b5ae69a96aa00c6c61b31a5030b not found: ID does not exist" Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.860123 5014 scope.go:117] "RemoveContainer" containerID="4f3c34008b5e21393c86698d7952998ee3a57456ae826c56a02eb0a1c2f5cdb5" Oct 06 22:29:54 crc kubenswrapper[5014]: E1006 22:29:54.860476 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f3c34008b5e21393c86698d7952998ee3a57456ae826c56a02eb0a1c2f5cdb5\": container with ID starting with 4f3c34008b5e21393c86698d7952998ee3a57456ae826c56a02eb0a1c2f5cdb5 not found: ID does not exist" containerID="4f3c34008b5e21393c86698d7952998ee3a57456ae826c56a02eb0a1c2f5cdb5" Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.860591 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f3c34008b5e21393c86698d7952998ee3a57456ae826c56a02eb0a1c2f5cdb5"} err="failed to get container status \"4f3c34008b5e21393c86698d7952998ee3a57456ae826c56a02eb0a1c2f5cdb5\": rpc error: code = NotFound desc = could not find container \"4f3c34008b5e21393c86698d7952998ee3a57456ae826c56a02eb0a1c2f5cdb5\": container with ID starting with 4f3c34008b5e21393c86698d7952998ee3a57456ae826c56a02eb0a1c2f5cdb5 not found: ID does not exist" Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.860696 5014 scope.go:117] "RemoveContainer" containerID="bc130a995a3a931c6935dabd1ec196ed154e2477af4ad3bc3e94ccfa6653e585" Oct 06 22:29:54 crc kubenswrapper[5014]: E1006 22:29:54.861000 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc130a995a3a931c6935dabd1ec196ed154e2477af4ad3bc3e94ccfa6653e585\": container with ID starting with bc130a995a3a931c6935dabd1ec196ed154e2477af4ad3bc3e94ccfa6653e585 not found: ID does not exist" containerID="bc130a995a3a931c6935dabd1ec196ed154e2477af4ad3bc3e94ccfa6653e585" Oct 06 22:29:54 crc kubenswrapper[5014]: I1006 22:29:54.861043 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc130a995a3a931c6935dabd1ec196ed154e2477af4ad3bc3e94ccfa6653e585"} err="failed to get container status \"bc130a995a3a931c6935dabd1ec196ed154e2477af4ad3bc3e94ccfa6653e585\": rpc error: code = NotFound desc = could not find container \"bc130a995a3a931c6935dabd1ec196ed154e2477af4ad3bc3e94ccfa6653e585\": container with ID starting with bc130a995a3a931c6935dabd1ec196ed154e2477af4ad3bc3e94ccfa6653e585 not found: ID does not exist" Oct 06 22:29:55 crc kubenswrapper[5014]: I1006 22:29:55.503781 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd103acc-8138-4b1b-a879-1ba8b5a64796" path="/var/lib/kubelet/pods/fd103acc-8138-4b1b-a879-1ba8b5a64796/volumes" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.182887 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b"] Oct 06 22:30:00 crc kubenswrapper[5014]: E1006 22:30:00.183686 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd103acc-8138-4b1b-a879-1ba8b5a64796" containerName="extract-utilities" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.183726 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd103acc-8138-4b1b-a879-1ba8b5a64796" containerName="extract-utilities" Oct 06 22:30:00 crc kubenswrapper[5014]: E1006 22:30:00.183767 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd103acc-8138-4b1b-a879-1ba8b5a64796" containerName="extract-content" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.183779 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd103acc-8138-4b1b-a879-1ba8b5a64796" containerName="extract-content" Oct 06 22:30:00 crc kubenswrapper[5014]: E1006 22:30:00.183802 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd103acc-8138-4b1b-a879-1ba8b5a64796" containerName="registry-server" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.183814 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd103acc-8138-4b1b-a879-1ba8b5a64796" containerName="registry-server" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.184037 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd103acc-8138-4b1b-a879-1ba8b5a64796" containerName="registry-server" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.184993 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.187751 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.188013 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.201095 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b"] Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.253771 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-config-volume\") pod \"collect-profiles-29329830-cws5b\" (UID: \"9c202b99-d2c5-4819-9da6-a4bc2f99e98d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.253875 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vh44z\" (UniqueName: \"kubernetes.io/projected/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-kube-api-access-vh44z\") pod \"collect-profiles-29329830-cws5b\" (UID: \"9c202b99-d2c5-4819-9da6-a4bc2f99e98d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.253917 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-secret-volume\") pod \"collect-profiles-29329830-cws5b\" (UID: \"9c202b99-d2c5-4819-9da6-a4bc2f99e98d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.355227 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vh44z\" (UniqueName: \"kubernetes.io/projected/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-kube-api-access-vh44z\") pod \"collect-profiles-29329830-cws5b\" (UID: \"9c202b99-d2c5-4819-9da6-a4bc2f99e98d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.355331 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-secret-volume\") pod \"collect-profiles-29329830-cws5b\" (UID: \"9c202b99-d2c5-4819-9da6-a4bc2f99e98d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.355487 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-config-volume\") pod \"collect-profiles-29329830-cws5b\" (UID: \"9c202b99-d2c5-4819-9da6-a4bc2f99e98d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.357319 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-config-volume\") pod \"collect-profiles-29329830-cws5b\" (UID: \"9c202b99-d2c5-4819-9da6-a4bc2f99e98d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.365866 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-secret-volume\") pod \"collect-profiles-29329830-cws5b\" (UID: \"9c202b99-d2c5-4819-9da6-a4bc2f99e98d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.377273 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vh44z\" (UniqueName: \"kubernetes.io/projected/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-kube-api-access-vh44z\") pod \"collect-profiles-29329830-cws5b\" (UID: \"9c202b99-d2c5-4819-9da6-a4bc2f99e98d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b" Oct 06 22:30:00 crc kubenswrapper[5014]: I1006 22:30:00.523668 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b" Oct 06 22:30:01 crc kubenswrapper[5014]: I1006 22:30:01.031259 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b"] Oct 06 22:30:01 crc kubenswrapper[5014]: I1006 22:30:01.810012 5014 generic.go:334] "Generic (PLEG): container finished" podID="9c202b99-d2c5-4819-9da6-a4bc2f99e98d" containerID="224362c0f15dddef4cc99f86fe86b003cf3adbe9a47384c5b92c7e5b69965908" exitCode=0 Oct 06 22:30:01 crc kubenswrapper[5014]: I1006 22:30:01.810072 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b" event={"ID":"9c202b99-d2c5-4819-9da6-a4bc2f99e98d","Type":"ContainerDied","Data":"224362c0f15dddef4cc99f86fe86b003cf3adbe9a47384c5b92c7e5b69965908"} Oct 06 22:30:01 crc kubenswrapper[5014]: I1006 22:30:01.810110 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b" event={"ID":"9c202b99-d2c5-4819-9da6-a4bc2f99e98d","Type":"ContainerStarted","Data":"40ea1d1d0d9fdab227ceff434a193cde3dfbba7036410d39cd296e3734ce5c44"} Oct 06 22:30:03 crc kubenswrapper[5014]: I1006 22:30:03.159967 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b" Oct 06 22:30:03 crc kubenswrapper[5014]: I1006 22:30:03.193814 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vh44z\" (UniqueName: \"kubernetes.io/projected/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-kube-api-access-vh44z\") pod \"9c202b99-d2c5-4819-9da6-a4bc2f99e98d\" (UID: \"9c202b99-d2c5-4819-9da6-a4bc2f99e98d\") " Oct 06 22:30:03 crc kubenswrapper[5014]: I1006 22:30:03.193897 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-secret-volume\") pod \"9c202b99-d2c5-4819-9da6-a4bc2f99e98d\" (UID: \"9c202b99-d2c5-4819-9da6-a4bc2f99e98d\") " Oct 06 22:30:03 crc kubenswrapper[5014]: I1006 22:30:03.193958 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-config-volume\") pod \"9c202b99-d2c5-4819-9da6-a4bc2f99e98d\" (UID: \"9c202b99-d2c5-4819-9da6-a4bc2f99e98d\") " Oct 06 22:30:03 crc kubenswrapper[5014]: I1006 22:30:03.195438 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-config-volume" (OuterVolumeSpecName: "config-volume") pod "9c202b99-d2c5-4819-9da6-a4bc2f99e98d" (UID: "9c202b99-d2c5-4819-9da6-a4bc2f99e98d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:30:03 crc kubenswrapper[5014]: I1006 22:30:03.201128 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "9c202b99-d2c5-4819-9da6-a4bc2f99e98d" (UID: "9c202b99-d2c5-4819-9da6-a4bc2f99e98d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 22:30:03 crc kubenswrapper[5014]: I1006 22:30:03.203823 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-kube-api-access-vh44z" (OuterVolumeSpecName: "kube-api-access-vh44z") pod "9c202b99-d2c5-4819-9da6-a4bc2f99e98d" (UID: "9c202b99-d2c5-4819-9da6-a4bc2f99e98d"). InnerVolumeSpecName "kube-api-access-vh44z". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:30:03 crc kubenswrapper[5014]: I1006 22:30:03.295324 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vh44z\" (UniqueName: \"kubernetes.io/projected/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-kube-api-access-vh44z\") on node \"crc\" DevicePath \"\"" Oct 06 22:30:03 crc kubenswrapper[5014]: I1006 22:30:03.295360 5014 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 06 22:30:03 crc kubenswrapper[5014]: I1006 22:30:03.295369 5014 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9c202b99-d2c5-4819-9da6-a4bc2f99e98d-config-volume\") on node \"crc\" DevicePath \"\"" Oct 06 22:30:03 crc kubenswrapper[5014]: I1006 22:30:03.834042 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b" event={"ID":"9c202b99-d2c5-4819-9da6-a4bc2f99e98d","Type":"ContainerDied","Data":"40ea1d1d0d9fdab227ceff434a193cde3dfbba7036410d39cd296e3734ce5c44"} Oct 06 22:30:03 crc kubenswrapper[5014]: I1006 22:30:03.834400 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40ea1d1d0d9fdab227ceff434a193cde3dfbba7036410d39cd296e3734ce5c44" Oct 06 22:30:03 crc kubenswrapper[5014]: I1006 22:30:03.834133 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329830-cws5b" Oct 06 22:30:04 crc kubenswrapper[5014]: I1006 22:30:04.246429 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44"] Oct 06 22:30:04 crc kubenswrapper[5014]: I1006 22:30:04.252880 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329785-6xg44"] Oct 06 22:30:05 crc kubenswrapper[5014]: I1006 22:30:05.500724 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e81f009f-1219-4bfc-8516-e68492370963" path="/var/lib/kubelet/pods/e81f009f-1219-4bfc-8516-e68492370963/volumes" Oct 06 22:30:21 crc kubenswrapper[5014]: I1006 22:30:21.735480 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:30:21 crc kubenswrapper[5014]: I1006 22:30:21.736182 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:30:51 crc kubenswrapper[5014]: I1006 22:30:51.735263 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:30:51 crc kubenswrapper[5014]: I1006 22:30:51.735904 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:30:51 crc kubenswrapper[5014]: I1006 22:30:51.735965 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 22:30:51 crc kubenswrapper[5014]: I1006 22:30:51.736687 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"798de3444f7c40078c43c5e21f686ecd23ef225d8de60b2170e70ab24cc2022c"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 22:30:51 crc kubenswrapper[5014]: I1006 22:30:51.736780 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://798de3444f7c40078c43c5e21f686ecd23ef225d8de60b2170e70ab24cc2022c" gracePeriod=600 Oct 06 22:30:52 crc kubenswrapper[5014]: I1006 22:30:52.326304 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="798de3444f7c40078c43c5e21f686ecd23ef225d8de60b2170e70ab24cc2022c" exitCode=0 Oct 06 22:30:52 crc kubenswrapper[5014]: I1006 22:30:52.326367 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"798de3444f7c40078c43c5e21f686ecd23ef225d8de60b2170e70ab24cc2022c"} Oct 06 22:30:52 crc kubenswrapper[5014]: I1006 22:30:52.326737 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416"} Oct 06 22:30:52 crc kubenswrapper[5014]: I1006 22:30:52.326767 5014 scope.go:117] "RemoveContainer" containerID="5a870d38852058796484925655d1a681ff93165903dc20d50d1b43d2036c382b" Oct 06 22:31:02 crc kubenswrapper[5014]: I1006 22:31:02.218661 5014 scope.go:117] "RemoveContainer" containerID="1010ad625e7f41bf996aecce90637779585df2f8e573b1ecb843256705457d18" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.512700 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ppbgc"] Oct 06 22:31:08 crc kubenswrapper[5014]: E1006 22:31:08.514180 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c202b99-d2c5-4819-9da6-a4bc2f99e98d" containerName="collect-profiles" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.514213 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c202b99-d2c5-4819-9da6-a4bc2f99e98d" containerName="collect-profiles" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.514586 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c202b99-d2c5-4819-9da6-a4bc2f99e98d" containerName="collect-profiles" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.517261 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.532700 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ppbgc"] Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.585494 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10110c8d-ea85-462c-af4e-f4d78009e677-utilities\") pod \"redhat-operators-ppbgc\" (UID: \"10110c8d-ea85-462c-af4e-f4d78009e677\") " pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.585749 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84bsx\" (UniqueName: \"kubernetes.io/projected/10110c8d-ea85-462c-af4e-f4d78009e677-kube-api-access-84bsx\") pod \"redhat-operators-ppbgc\" (UID: \"10110c8d-ea85-462c-af4e-f4d78009e677\") " pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.585790 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10110c8d-ea85-462c-af4e-f4d78009e677-catalog-content\") pod \"redhat-operators-ppbgc\" (UID: \"10110c8d-ea85-462c-af4e-f4d78009e677\") " pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.687880 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10110c8d-ea85-462c-af4e-f4d78009e677-utilities\") pod \"redhat-operators-ppbgc\" (UID: \"10110c8d-ea85-462c-af4e-f4d78009e677\") " pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.687995 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84bsx\" (UniqueName: \"kubernetes.io/projected/10110c8d-ea85-462c-af4e-f4d78009e677-kube-api-access-84bsx\") pod \"redhat-operators-ppbgc\" (UID: \"10110c8d-ea85-462c-af4e-f4d78009e677\") " pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.688023 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10110c8d-ea85-462c-af4e-f4d78009e677-catalog-content\") pod \"redhat-operators-ppbgc\" (UID: \"10110c8d-ea85-462c-af4e-f4d78009e677\") " pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.688808 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10110c8d-ea85-462c-af4e-f4d78009e677-utilities\") pod \"redhat-operators-ppbgc\" (UID: \"10110c8d-ea85-462c-af4e-f4d78009e677\") " pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.688822 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10110c8d-ea85-462c-af4e-f4d78009e677-catalog-content\") pod \"redhat-operators-ppbgc\" (UID: \"10110c8d-ea85-462c-af4e-f4d78009e677\") " pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.698047 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fb6vx"] Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.699609 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.721215 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fb6vx"] Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.721958 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84bsx\" (UniqueName: \"kubernetes.io/projected/10110c8d-ea85-462c-af4e-f4d78009e677-kube-api-access-84bsx\") pod \"redhat-operators-ppbgc\" (UID: \"10110c8d-ea85-462c-af4e-f4d78009e677\") " pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.788886 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32a58e7e-01a4-4991-98fb-724ef4fb4536-utilities\") pod \"community-operators-fb6vx\" (UID: \"32a58e7e-01a4-4991-98fb-724ef4fb4536\") " pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.788947 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32a58e7e-01a4-4991-98fb-724ef4fb4536-catalog-content\") pod \"community-operators-fb6vx\" (UID: \"32a58e7e-01a4-4991-98fb-724ef4fb4536\") " pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.788982 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thvms\" (UniqueName: \"kubernetes.io/projected/32a58e7e-01a4-4991-98fb-724ef4fb4536-kube-api-access-thvms\") pod \"community-operators-fb6vx\" (UID: \"32a58e7e-01a4-4991-98fb-724ef4fb4536\") " pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.850960 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.889930 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32a58e7e-01a4-4991-98fb-724ef4fb4536-utilities\") pod \"community-operators-fb6vx\" (UID: \"32a58e7e-01a4-4991-98fb-724ef4fb4536\") " pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.890259 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32a58e7e-01a4-4991-98fb-724ef4fb4536-catalog-content\") pod \"community-operators-fb6vx\" (UID: \"32a58e7e-01a4-4991-98fb-724ef4fb4536\") " pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.890427 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thvms\" (UniqueName: \"kubernetes.io/projected/32a58e7e-01a4-4991-98fb-724ef4fb4536-kube-api-access-thvms\") pod \"community-operators-fb6vx\" (UID: \"32a58e7e-01a4-4991-98fb-724ef4fb4536\") " pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.890538 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32a58e7e-01a4-4991-98fb-724ef4fb4536-utilities\") pod \"community-operators-fb6vx\" (UID: \"32a58e7e-01a4-4991-98fb-724ef4fb4536\") " pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.890867 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32a58e7e-01a4-4991-98fb-724ef4fb4536-catalog-content\") pod \"community-operators-fb6vx\" (UID: \"32a58e7e-01a4-4991-98fb-724ef4fb4536\") " pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:08 crc kubenswrapper[5014]: I1006 22:31:08.911407 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thvms\" (UniqueName: \"kubernetes.io/projected/32a58e7e-01a4-4991-98fb-724ef4fb4536-kube-api-access-thvms\") pod \"community-operators-fb6vx\" (UID: \"32a58e7e-01a4-4991-98fb-724ef4fb4536\") " pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:09 crc kubenswrapper[5014]: I1006 22:31:09.024967 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:09 crc kubenswrapper[5014]: I1006 22:31:09.387137 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ppbgc"] Oct 06 22:31:09 crc kubenswrapper[5014]: I1006 22:31:09.518081 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ppbgc" event={"ID":"10110c8d-ea85-462c-af4e-f4d78009e677","Type":"ContainerStarted","Data":"8eb843b7eeb6eaf76819e6d9de94bdbea158bdbb46c1fc31ec06eee3b2f1352e"} Oct 06 22:31:09 crc kubenswrapper[5014]: I1006 22:31:09.605305 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fb6vx"] Oct 06 22:31:09 crc kubenswrapper[5014]: W1006 22:31:09.611556 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod32a58e7e_01a4_4991_98fb_724ef4fb4536.slice/crio-6bb46bd164d0baf285d52e5ca17249c103a2cfa48872920fcee658f1d58a84f5 WatchSource:0}: Error finding container 6bb46bd164d0baf285d52e5ca17249c103a2cfa48872920fcee658f1d58a84f5: Status 404 returned error can't find the container with id 6bb46bd164d0baf285d52e5ca17249c103a2cfa48872920fcee658f1d58a84f5 Oct 06 22:31:10 crc kubenswrapper[5014]: I1006 22:31:10.532026 5014 generic.go:334] "Generic (PLEG): container finished" podID="32a58e7e-01a4-4991-98fb-724ef4fb4536" containerID="0baa5efe5a6b6cd471f692951fae7a5e91765b5130668327034373bd1110ec9d" exitCode=0 Oct 06 22:31:10 crc kubenswrapper[5014]: I1006 22:31:10.532171 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fb6vx" event={"ID":"32a58e7e-01a4-4991-98fb-724ef4fb4536","Type":"ContainerDied","Data":"0baa5efe5a6b6cd471f692951fae7a5e91765b5130668327034373bd1110ec9d"} Oct 06 22:31:10 crc kubenswrapper[5014]: I1006 22:31:10.532775 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fb6vx" event={"ID":"32a58e7e-01a4-4991-98fb-724ef4fb4536","Type":"ContainerStarted","Data":"6bb46bd164d0baf285d52e5ca17249c103a2cfa48872920fcee658f1d58a84f5"} Oct 06 22:31:10 crc kubenswrapper[5014]: I1006 22:31:10.535869 5014 generic.go:334] "Generic (PLEG): container finished" podID="10110c8d-ea85-462c-af4e-f4d78009e677" containerID="4d263e928a3097235a9a4b4810a5a4432ed74e1b5986eac128a76eb6ee614973" exitCode=0 Oct 06 22:31:10 crc kubenswrapper[5014]: I1006 22:31:10.535931 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ppbgc" event={"ID":"10110c8d-ea85-462c-af4e-f4d78009e677","Type":"ContainerDied","Data":"4d263e928a3097235a9a4b4810a5a4432ed74e1b5986eac128a76eb6ee614973"} Oct 06 22:31:11 crc kubenswrapper[5014]: I1006 22:31:11.549367 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fb6vx" event={"ID":"32a58e7e-01a4-4991-98fb-724ef4fb4536","Type":"ContainerStarted","Data":"a10b3880ab94719a9cecbe83961920cdd69e09d8cf3ab67a8546017a9902e1dc"} Oct 06 22:31:11 crc kubenswrapper[5014]: I1006 22:31:11.554948 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ppbgc" event={"ID":"10110c8d-ea85-462c-af4e-f4d78009e677","Type":"ContainerStarted","Data":"8aca175700b4ba2306ea109658e283cfb93b5dedabc765526eaa26ef7a2abc55"} Oct 06 22:31:12 crc kubenswrapper[5014]: I1006 22:31:12.587686 5014 generic.go:334] "Generic (PLEG): container finished" podID="10110c8d-ea85-462c-af4e-f4d78009e677" containerID="8aca175700b4ba2306ea109658e283cfb93b5dedabc765526eaa26ef7a2abc55" exitCode=0 Oct 06 22:31:12 crc kubenswrapper[5014]: I1006 22:31:12.588008 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ppbgc" event={"ID":"10110c8d-ea85-462c-af4e-f4d78009e677","Type":"ContainerDied","Data":"8aca175700b4ba2306ea109658e283cfb93b5dedabc765526eaa26ef7a2abc55"} Oct 06 22:31:12 crc kubenswrapper[5014]: I1006 22:31:12.591581 5014 generic.go:334] "Generic (PLEG): container finished" podID="32a58e7e-01a4-4991-98fb-724ef4fb4536" containerID="a10b3880ab94719a9cecbe83961920cdd69e09d8cf3ab67a8546017a9902e1dc" exitCode=0 Oct 06 22:31:12 crc kubenswrapper[5014]: I1006 22:31:12.591876 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fb6vx" event={"ID":"32a58e7e-01a4-4991-98fb-724ef4fb4536","Type":"ContainerDied","Data":"a10b3880ab94719a9cecbe83961920cdd69e09d8cf3ab67a8546017a9902e1dc"} Oct 06 22:31:13 crc kubenswrapper[5014]: I1006 22:31:13.615247 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fb6vx" event={"ID":"32a58e7e-01a4-4991-98fb-724ef4fb4536","Type":"ContainerStarted","Data":"a5597d356da4c4dc70dc99fbe5c7bc4bd67050571b8aeb436e07ee0fc79ff4b7"} Oct 06 22:31:13 crc kubenswrapper[5014]: I1006 22:31:13.620665 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ppbgc" event={"ID":"10110c8d-ea85-462c-af4e-f4d78009e677","Type":"ContainerStarted","Data":"32c8487cc66b1ddc67671b1f4cede0d8a157c60cc6b28b92a5caf2ab417fce1e"} Oct 06 22:31:13 crc kubenswrapper[5014]: I1006 22:31:13.641827 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fb6vx" podStartSLOduration=3.102021501 podStartE2EDuration="5.641804801s" podCreationTimestamp="2025-10-06 22:31:08 +0000 UTC" firstStartedPulling="2025-10-06 22:31:10.535876389 +0000 UTC m=+3615.828913163" lastFinishedPulling="2025-10-06 22:31:13.075659689 +0000 UTC m=+3618.368696463" observedRunningTime="2025-10-06 22:31:13.634943945 +0000 UTC m=+3618.927980719" watchObservedRunningTime="2025-10-06 22:31:13.641804801 +0000 UTC m=+3618.934841575" Oct 06 22:31:13 crc kubenswrapper[5014]: I1006 22:31:13.660821 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ppbgc" podStartSLOduration=3.116011521 podStartE2EDuration="5.660802938s" podCreationTimestamp="2025-10-06 22:31:08 +0000 UTC" firstStartedPulling="2025-10-06 22:31:10.540452093 +0000 UTC m=+3615.833488857" lastFinishedPulling="2025-10-06 22:31:13.085185429 +0000 UTC m=+3618.378280274" observedRunningTime="2025-10-06 22:31:13.658035642 +0000 UTC m=+3618.951072386" watchObservedRunningTime="2025-10-06 22:31:13.660802938 +0000 UTC m=+3618.953839682" Oct 06 22:31:18 crc kubenswrapper[5014]: I1006 22:31:18.851737 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:18 crc kubenswrapper[5014]: I1006 22:31:18.852574 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:19 crc kubenswrapper[5014]: I1006 22:31:19.026421 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:19 crc kubenswrapper[5014]: I1006 22:31:19.026502 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:19 crc kubenswrapper[5014]: I1006 22:31:19.113333 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:19 crc kubenswrapper[5014]: I1006 22:31:19.751414 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:19 crc kubenswrapper[5014]: I1006 22:31:19.822434 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fb6vx"] Oct 06 22:31:19 crc kubenswrapper[5014]: I1006 22:31:19.934460 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ppbgc" podUID="10110c8d-ea85-462c-af4e-f4d78009e677" containerName="registry-server" probeResult="failure" output=< Oct 06 22:31:19 crc kubenswrapper[5014]: timeout: failed to connect service ":50051" within 1s Oct 06 22:31:19 crc kubenswrapper[5014]: > Oct 06 22:31:21 crc kubenswrapper[5014]: I1006 22:31:21.701443 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fb6vx" podUID="32a58e7e-01a4-4991-98fb-724ef4fb4536" containerName="registry-server" containerID="cri-o://a5597d356da4c4dc70dc99fbe5c7bc4bd67050571b8aeb436e07ee0fc79ff4b7" gracePeriod=2 Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.232486 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.343008 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32a58e7e-01a4-4991-98fb-724ef4fb4536-utilities\") pod \"32a58e7e-01a4-4991-98fb-724ef4fb4536\" (UID: \"32a58e7e-01a4-4991-98fb-724ef4fb4536\") " Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.343105 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thvms\" (UniqueName: \"kubernetes.io/projected/32a58e7e-01a4-4991-98fb-724ef4fb4536-kube-api-access-thvms\") pod \"32a58e7e-01a4-4991-98fb-724ef4fb4536\" (UID: \"32a58e7e-01a4-4991-98fb-724ef4fb4536\") " Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.343144 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32a58e7e-01a4-4991-98fb-724ef4fb4536-catalog-content\") pod \"32a58e7e-01a4-4991-98fb-724ef4fb4536\" (UID: \"32a58e7e-01a4-4991-98fb-724ef4fb4536\") " Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.344506 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32a58e7e-01a4-4991-98fb-724ef4fb4536-utilities" (OuterVolumeSpecName: "utilities") pod "32a58e7e-01a4-4991-98fb-724ef4fb4536" (UID: "32a58e7e-01a4-4991-98fb-724ef4fb4536"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.349217 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32a58e7e-01a4-4991-98fb-724ef4fb4536-kube-api-access-thvms" (OuterVolumeSpecName: "kube-api-access-thvms") pod "32a58e7e-01a4-4991-98fb-724ef4fb4536" (UID: "32a58e7e-01a4-4991-98fb-724ef4fb4536"). InnerVolumeSpecName "kube-api-access-thvms". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.444838 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32a58e7e-01a4-4991-98fb-724ef4fb4536-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.444915 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thvms\" (UniqueName: \"kubernetes.io/projected/32a58e7e-01a4-4991-98fb-724ef4fb4536-kube-api-access-thvms\") on node \"crc\" DevicePath \"\"" Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.632212 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32a58e7e-01a4-4991-98fb-724ef4fb4536-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "32a58e7e-01a4-4991-98fb-724ef4fb4536" (UID: "32a58e7e-01a4-4991-98fb-724ef4fb4536"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.647917 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32a58e7e-01a4-4991-98fb-724ef4fb4536-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.718152 5014 generic.go:334] "Generic (PLEG): container finished" podID="32a58e7e-01a4-4991-98fb-724ef4fb4536" containerID="a5597d356da4c4dc70dc99fbe5c7bc4bd67050571b8aeb436e07ee0fc79ff4b7" exitCode=0 Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.718242 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fb6vx" event={"ID":"32a58e7e-01a4-4991-98fb-724ef4fb4536","Type":"ContainerDied","Data":"a5597d356da4c4dc70dc99fbe5c7bc4bd67050571b8aeb436e07ee0fc79ff4b7"} Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.718314 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fb6vx" event={"ID":"32a58e7e-01a4-4991-98fb-724ef4fb4536","Type":"ContainerDied","Data":"6bb46bd164d0baf285d52e5ca17249c103a2cfa48872920fcee658f1d58a84f5"} Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.718327 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fb6vx" Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.718348 5014 scope.go:117] "RemoveContainer" containerID="a5597d356da4c4dc70dc99fbe5c7bc4bd67050571b8aeb436e07ee0fc79ff4b7" Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.750561 5014 scope.go:117] "RemoveContainer" containerID="a10b3880ab94719a9cecbe83961920cdd69e09d8cf3ab67a8546017a9902e1dc" Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.776889 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fb6vx"] Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.787137 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fb6vx"] Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.807109 5014 scope.go:117] "RemoveContainer" containerID="0baa5efe5a6b6cd471f692951fae7a5e91765b5130668327034373bd1110ec9d" Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.834828 5014 scope.go:117] "RemoveContainer" containerID="a5597d356da4c4dc70dc99fbe5c7bc4bd67050571b8aeb436e07ee0fc79ff4b7" Oct 06 22:31:22 crc kubenswrapper[5014]: E1006 22:31:22.835486 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5597d356da4c4dc70dc99fbe5c7bc4bd67050571b8aeb436e07ee0fc79ff4b7\": container with ID starting with a5597d356da4c4dc70dc99fbe5c7bc4bd67050571b8aeb436e07ee0fc79ff4b7 not found: ID does not exist" containerID="a5597d356da4c4dc70dc99fbe5c7bc4bd67050571b8aeb436e07ee0fc79ff4b7" Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.835561 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5597d356da4c4dc70dc99fbe5c7bc4bd67050571b8aeb436e07ee0fc79ff4b7"} err="failed to get container status \"a5597d356da4c4dc70dc99fbe5c7bc4bd67050571b8aeb436e07ee0fc79ff4b7\": rpc error: code = NotFound desc = could not find container \"a5597d356da4c4dc70dc99fbe5c7bc4bd67050571b8aeb436e07ee0fc79ff4b7\": container with ID starting with a5597d356da4c4dc70dc99fbe5c7bc4bd67050571b8aeb436e07ee0fc79ff4b7 not found: ID does not exist" Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.835611 5014 scope.go:117] "RemoveContainer" containerID="a10b3880ab94719a9cecbe83961920cdd69e09d8cf3ab67a8546017a9902e1dc" Oct 06 22:31:22 crc kubenswrapper[5014]: E1006 22:31:22.836283 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a10b3880ab94719a9cecbe83961920cdd69e09d8cf3ab67a8546017a9902e1dc\": container with ID starting with a10b3880ab94719a9cecbe83961920cdd69e09d8cf3ab67a8546017a9902e1dc not found: ID does not exist" containerID="a10b3880ab94719a9cecbe83961920cdd69e09d8cf3ab67a8546017a9902e1dc" Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.836366 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a10b3880ab94719a9cecbe83961920cdd69e09d8cf3ab67a8546017a9902e1dc"} err="failed to get container status \"a10b3880ab94719a9cecbe83961920cdd69e09d8cf3ab67a8546017a9902e1dc\": rpc error: code = NotFound desc = could not find container \"a10b3880ab94719a9cecbe83961920cdd69e09d8cf3ab67a8546017a9902e1dc\": container with ID starting with a10b3880ab94719a9cecbe83961920cdd69e09d8cf3ab67a8546017a9902e1dc not found: ID does not exist" Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.836434 5014 scope.go:117] "RemoveContainer" containerID="0baa5efe5a6b6cd471f692951fae7a5e91765b5130668327034373bd1110ec9d" Oct 06 22:31:22 crc kubenswrapper[5014]: E1006 22:31:22.837156 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0baa5efe5a6b6cd471f692951fae7a5e91765b5130668327034373bd1110ec9d\": container with ID starting with 0baa5efe5a6b6cd471f692951fae7a5e91765b5130668327034373bd1110ec9d not found: ID does not exist" containerID="0baa5efe5a6b6cd471f692951fae7a5e91765b5130668327034373bd1110ec9d" Oct 06 22:31:22 crc kubenswrapper[5014]: I1006 22:31:22.837207 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0baa5efe5a6b6cd471f692951fae7a5e91765b5130668327034373bd1110ec9d"} err="failed to get container status \"0baa5efe5a6b6cd471f692951fae7a5e91765b5130668327034373bd1110ec9d\": rpc error: code = NotFound desc = could not find container \"0baa5efe5a6b6cd471f692951fae7a5e91765b5130668327034373bd1110ec9d\": container with ID starting with 0baa5efe5a6b6cd471f692951fae7a5e91765b5130668327034373bd1110ec9d not found: ID does not exist" Oct 06 22:31:23 crc kubenswrapper[5014]: I1006 22:31:23.500942 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32a58e7e-01a4-4991-98fb-724ef4fb4536" path="/var/lib/kubelet/pods/32a58e7e-01a4-4991-98fb-724ef4fb4536/volumes" Oct 06 22:31:28 crc kubenswrapper[5014]: I1006 22:31:28.929483 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:29 crc kubenswrapper[5014]: I1006 22:31:29.008158 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:29 crc kubenswrapper[5014]: I1006 22:31:29.179541 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ppbgc"] Oct 06 22:31:30 crc kubenswrapper[5014]: I1006 22:31:30.807461 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ppbgc" podUID="10110c8d-ea85-462c-af4e-f4d78009e677" containerName="registry-server" containerID="cri-o://32c8487cc66b1ddc67671b1f4cede0d8a157c60cc6b28b92a5caf2ab417fce1e" gracePeriod=2 Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.298421 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.488648 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10110c8d-ea85-462c-af4e-f4d78009e677-utilities\") pod \"10110c8d-ea85-462c-af4e-f4d78009e677\" (UID: \"10110c8d-ea85-462c-af4e-f4d78009e677\") " Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.488710 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-84bsx\" (UniqueName: \"kubernetes.io/projected/10110c8d-ea85-462c-af4e-f4d78009e677-kube-api-access-84bsx\") pod \"10110c8d-ea85-462c-af4e-f4d78009e677\" (UID: \"10110c8d-ea85-462c-af4e-f4d78009e677\") " Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.488753 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10110c8d-ea85-462c-af4e-f4d78009e677-catalog-content\") pod \"10110c8d-ea85-462c-af4e-f4d78009e677\" (UID: \"10110c8d-ea85-462c-af4e-f4d78009e677\") " Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.490671 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10110c8d-ea85-462c-af4e-f4d78009e677-utilities" (OuterVolumeSpecName: "utilities") pod "10110c8d-ea85-462c-af4e-f4d78009e677" (UID: "10110c8d-ea85-462c-af4e-f4d78009e677"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.502005 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10110c8d-ea85-462c-af4e-f4d78009e677-kube-api-access-84bsx" (OuterVolumeSpecName: "kube-api-access-84bsx") pod "10110c8d-ea85-462c-af4e-f4d78009e677" (UID: "10110c8d-ea85-462c-af4e-f4d78009e677"). InnerVolumeSpecName "kube-api-access-84bsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.593019 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10110c8d-ea85-462c-af4e-f4d78009e677-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.593070 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-84bsx\" (UniqueName: \"kubernetes.io/projected/10110c8d-ea85-462c-af4e-f4d78009e677-kube-api-access-84bsx\") on node \"crc\" DevicePath \"\"" Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.637209 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10110c8d-ea85-462c-af4e-f4d78009e677-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "10110c8d-ea85-462c-af4e-f4d78009e677" (UID: "10110c8d-ea85-462c-af4e-f4d78009e677"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.694477 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10110c8d-ea85-462c-af4e-f4d78009e677-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.820270 5014 generic.go:334] "Generic (PLEG): container finished" podID="10110c8d-ea85-462c-af4e-f4d78009e677" containerID="32c8487cc66b1ddc67671b1f4cede0d8a157c60cc6b28b92a5caf2ab417fce1e" exitCode=0 Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.820289 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ppbgc" Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.820328 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ppbgc" event={"ID":"10110c8d-ea85-462c-af4e-f4d78009e677","Type":"ContainerDied","Data":"32c8487cc66b1ddc67671b1f4cede0d8a157c60cc6b28b92a5caf2ab417fce1e"} Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.820513 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ppbgc" event={"ID":"10110c8d-ea85-462c-af4e-f4d78009e677","Type":"ContainerDied","Data":"8eb843b7eeb6eaf76819e6d9de94bdbea158bdbb46c1fc31ec06eee3b2f1352e"} Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.820668 5014 scope.go:117] "RemoveContainer" containerID="32c8487cc66b1ddc67671b1f4cede0d8a157c60cc6b28b92a5caf2ab417fce1e" Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.845005 5014 scope.go:117] "RemoveContainer" containerID="8aca175700b4ba2306ea109658e283cfb93b5dedabc765526eaa26ef7a2abc55" Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.876875 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ppbgc"] Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.882398 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ppbgc"] Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.884094 5014 scope.go:117] "RemoveContainer" containerID="4d263e928a3097235a9a4b4810a5a4432ed74e1b5986eac128a76eb6ee614973" Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.912216 5014 scope.go:117] "RemoveContainer" containerID="32c8487cc66b1ddc67671b1f4cede0d8a157c60cc6b28b92a5caf2ab417fce1e" Oct 06 22:31:31 crc kubenswrapper[5014]: E1006 22:31:31.912873 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32c8487cc66b1ddc67671b1f4cede0d8a157c60cc6b28b92a5caf2ab417fce1e\": container with ID starting with 32c8487cc66b1ddc67671b1f4cede0d8a157c60cc6b28b92a5caf2ab417fce1e not found: ID does not exist" containerID="32c8487cc66b1ddc67671b1f4cede0d8a157c60cc6b28b92a5caf2ab417fce1e" Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.912975 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32c8487cc66b1ddc67671b1f4cede0d8a157c60cc6b28b92a5caf2ab417fce1e"} err="failed to get container status \"32c8487cc66b1ddc67671b1f4cede0d8a157c60cc6b28b92a5caf2ab417fce1e\": rpc error: code = NotFound desc = could not find container \"32c8487cc66b1ddc67671b1f4cede0d8a157c60cc6b28b92a5caf2ab417fce1e\": container with ID starting with 32c8487cc66b1ddc67671b1f4cede0d8a157c60cc6b28b92a5caf2ab417fce1e not found: ID does not exist" Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.913070 5014 scope.go:117] "RemoveContainer" containerID="8aca175700b4ba2306ea109658e283cfb93b5dedabc765526eaa26ef7a2abc55" Oct 06 22:31:31 crc kubenswrapper[5014]: E1006 22:31:31.913544 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8aca175700b4ba2306ea109658e283cfb93b5dedabc765526eaa26ef7a2abc55\": container with ID starting with 8aca175700b4ba2306ea109658e283cfb93b5dedabc765526eaa26ef7a2abc55 not found: ID does not exist" containerID="8aca175700b4ba2306ea109658e283cfb93b5dedabc765526eaa26ef7a2abc55" Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.913662 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8aca175700b4ba2306ea109658e283cfb93b5dedabc765526eaa26ef7a2abc55"} err="failed to get container status \"8aca175700b4ba2306ea109658e283cfb93b5dedabc765526eaa26ef7a2abc55\": rpc error: code = NotFound desc = could not find container \"8aca175700b4ba2306ea109658e283cfb93b5dedabc765526eaa26ef7a2abc55\": container with ID starting with 8aca175700b4ba2306ea109658e283cfb93b5dedabc765526eaa26ef7a2abc55 not found: ID does not exist" Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.913728 5014 scope.go:117] "RemoveContainer" containerID="4d263e928a3097235a9a4b4810a5a4432ed74e1b5986eac128a76eb6ee614973" Oct 06 22:31:31 crc kubenswrapper[5014]: E1006 22:31:31.914276 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d263e928a3097235a9a4b4810a5a4432ed74e1b5986eac128a76eb6ee614973\": container with ID starting with 4d263e928a3097235a9a4b4810a5a4432ed74e1b5986eac128a76eb6ee614973 not found: ID does not exist" containerID="4d263e928a3097235a9a4b4810a5a4432ed74e1b5986eac128a76eb6ee614973" Oct 06 22:31:31 crc kubenswrapper[5014]: I1006 22:31:31.914343 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d263e928a3097235a9a4b4810a5a4432ed74e1b5986eac128a76eb6ee614973"} err="failed to get container status \"4d263e928a3097235a9a4b4810a5a4432ed74e1b5986eac128a76eb6ee614973\": rpc error: code = NotFound desc = could not find container \"4d263e928a3097235a9a4b4810a5a4432ed74e1b5986eac128a76eb6ee614973\": container with ID starting with 4d263e928a3097235a9a4b4810a5a4432ed74e1b5986eac128a76eb6ee614973 not found: ID does not exist" Oct 06 22:31:31 crc kubenswrapper[5014]: E1006 22:31:31.984574 5014 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod10110c8d_ea85_462c_af4e_f4d78009e677.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod10110c8d_ea85_462c_af4e_f4d78009e677.slice/crio-8eb843b7eeb6eaf76819e6d9de94bdbea158bdbb46c1fc31ec06eee3b2f1352e\": RecentStats: unable to find data in memory cache]" Oct 06 22:31:33 crc kubenswrapper[5014]: I1006 22:31:33.504943 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10110c8d-ea85-462c-af4e-f4d78009e677" path="/var/lib/kubelet/pods/10110c8d-ea85-462c-af4e-f4d78009e677/volumes" Oct 06 22:33:21 crc kubenswrapper[5014]: I1006 22:33:21.734974 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:33:21 crc kubenswrapper[5014]: I1006 22:33:21.735801 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:33:51 crc kubenswrapper[5014]: I1006 22:33:51.735337 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:33:51 crc kubenswrapper[5014]: I1006 22:33:51.736407 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:34:21 crc kubenswrapper[5014]: I1006 22:34:21.735540 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:34:21 crc kubenswrapper[5014]: I1006 22:34:21.736581 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:34:21 crc kubenswrapper[5014]: I1006 22:34:21.736708 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 22:34:21 crc kubenswrapper[5014]: I1006 22:34:21.737995 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 22:34:21 crc kubenswrapper[5014]: I1006 22:34:21.738328 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" gracePeriod=600 Oct 06 22:34:21 crc kubenswrapper[5014]: E1006 22:34:21.873675 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:34:22 crc kubenswrapper[5014]: I1006 22:34:22.485939 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" exitCode=0 Oct 06 22:34:22 crc kubenswrapper[5014]: I1006 22:34:22.486019 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416"} Oct 06 22:34:22 crc kubenswrapper[5014]: I1006 22:34:22.486062 5014 scope.go:117] "RemoveContainer" containerID="798de3444f7c40078c43c5e21f686ecd23ef225d8de60b2170e70ab24cc2022c" Oct 06 22:34:22 crc kubenswrapper[5014]: I1006 22:34:22.486597 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:34:22 crc kubenswrapper[5014]: E1006 22:34:22.487047 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:34:34 crc kubenswrapper[5014]: I1006 22:34:34.484942 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:34:34 crc kubenswrapper[5014]: E1006 22:34:34.486097 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:34:45 crc kubenswrapper[5014]: I1006 22:34:45.492781 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:34:45 crc kubenswrapper[5014]: E1006 22:34:45.494047 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:34:56 crc kubenswrapper[5014]: I1006 22:34:56.485146 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:34:56 crc kubenswrapper[5014]: E1006 22:34:56.488360 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:35:09 crc kubenswrapper[5014]: I1006 22:35:09.499213 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:35:09 crc kubenswrapper[5014]: E1006 22:35:09.502130 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:35:22 crc kubenswrapper[5014]: I1006 22:35:22.484660 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:35:22 crc kubenswrapper[5014]: E1006 22:35:22.485875 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:35:33 crc kubenswrapper[5014]: I1006 22:35:33.485040 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:35:33 crc kubenswrapper[5014]: E1006 22:35:33.486152 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:35:45 crc kubenswrapper[5014]: I1006 22:35:45.493914 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:35:45 crc kubenswrapper[5014]: E1006 22:35:45.495162 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:35:59 crc kubenswrapper[5014]: I1006 22:35:59.485078 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:35:59 crc kubenswrapper[5014]: E1006 22:35:59.486190 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:36:10 crc kubenswrapper[5014]: I1006 22:36:10.484011 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:36:10 crc kubenswrapper[5014]: E1006 22:36:10.484841 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:36:22 crc kubenswrapper[5014]: I1006 22:36:22.484903 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:36:22 crc kubenswrapper[5014]: E1006 22:36:22.487921 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:36:34 crc kubenswrapper[5014]: I1006 22:36:34.484822 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:36:34 crc kubenswrapper[5014]: E1006 22:36:34.486039 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:36:46 crc kubenswrapper[5014]: I1006 22:36:46.485225 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:36:46 crc kubenswrapper[5014]: E1006 22:36:46.486345 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:36:59 crc kubenswrapper[5014]: I1006 22:36:59.485177 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:36:59 crc kubenswrapper[5014]: E1006 22:36:59.485961 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:37:10 crc kubenswrapper[5014]: I1006 22:37:10.484978 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:37:10 crc kubenswrapper[5014]: E1006 22:37:10.488140 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:37:23 crc kubenswrapper[5014]: I1006 22:37:23.487718 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:37:23 crc kubenswrapper[5014]: E1006 22:37:23.488711 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:37:36 crc kubenswrapper[5014]: I1006 22:37:36.484150 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:37:36 crc kubenswrapper[5014]: E1006 22:37:36.485848 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:37:49 crc kubenswrapper[5014]: I1006 22:37:49.486032 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:37:49 crc kubenswrapper[5014]: E1006 22:37:49.487246 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:38:00 crc kubenswrapper[5014]: I1006 22:38:00.484680 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:38:00 crc kubenswrapper[5014]: E1006 22:38:00.485986 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:38:13 crc kubenswrapper[5014]: I1006 22:38:13.486562 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:38:13 crc kubenswrapper[5014]: E1006 22:38:13.487447 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:38:26 crc kubenswrapper[5014]: I1006 22:38:26.485287 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:38:26 crc kubenswrapper[5014]: E1006 22:38:26.486534 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:38:38 crc kubenswrapper[5014]: I1006 22:38:38.485269 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:38:38 crc kubenswrapper[5014]: E1006 22:38:38.486383 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:38:52 crc kubenswrapper[5014]: I1006 22:38:52.485712 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:38:52 crc kubenswrapper[5014]: E1006 22:38:52.486301 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:39:03 crc kubenswrapper[5014]: I1006 22:39:03.484229 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:39:03 crc kubenswrapper[5014]: E1006 22:39:03.485491 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:39:16 crc kubenswrapper[5014]: I1006 22:39:16.486160 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:39:16 crc kubenswrapper[5014]: E1006 22:39:16.487042 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:39:28 crc kubenswrapper[5014]: I1006 22:39:28.484505 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:39:29 crc kubenswrapper[5014]: I1006 22:39:29.497857 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"e797c99f3d7250575c0fe41e7b21e8ec126de5e18dd0bbd484c427d2d4fa6e36"} Oct 06 22:41:51 crc kubenswrapper[5014]: I1006 22:41:51.735058 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:41:51 crc kubenswrapper[5014]: I1006 22:41:51.736086 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.102756 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-czlvh"] Oct 06 22:41:55 crc kubenswrapper[5014]: E1006 22:41:55.103554 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10110c8d-ea85-462c-af4e-f4d78009e677" containerName="extract-utilities" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.103575 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="10110c8d-ea85-462c-af4e-f4d78009e677" containerName="extract-utilities" Oct 06 22:41:55 crc kubenswrapper[5014]: E1006 22:41:55.103612 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10110c8d-ea85-462c-af4e-f4d78009e677" containerName="registry-server" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.103650 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="10110c8d-ea85-462c-af4e-f4d78009e677" containerName="registry-server" Oct 06 22:41:55 crc kubenswrapper[5014]: E1006 22:41:55.103673 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32a58e7e-01a4-4991-98fb-724ef4fb4536" containerName="registry-server" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.103686 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="32a58e7e-01a4-4991-98fb-724ef4fb4536" containerName="registry-server" Oct 06 22:41:55 crc kubenswrapper[5014]: E1006 22:41:55.103707 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32a58e7e-01a4-4991-98fb-724ef4fb4536" containerName="extract-utilities" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.103718 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="32a58e7e-01a4-4991-98fb-724ef4fb4536" containerName="extract-utilities" Oct 06 22:41:55 crc kubenswrapper[5014]: E1006 22:41:55.103749 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32a58e7e-01a4-4991-98fb-724ef4fb4536" containerName="extract-content" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.103761 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="32a58e7e-01a4-4991-98fb-724ef4fb4536" containerName="extract-content" Oct 06 22:41:55 crc kubenswrapper[5014]: E1006 22:41:55.103785 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10110c8d-ea85-462c-af4e-f4d78009e677" containerName="extract-content" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.103797 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="10110c8d-ea85-462c-af4e-f4d78009e677" containerName="extract-content" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.104282 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="32a58e7e-01a4-4991-98fb-724ef4fb4536" containerName="registry-server" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.104315 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="10110c8d-ea85-462c-af4e-f4d78009e677" containerName="registry-server" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.106306 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.120096 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-czlvh"] Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.212446 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cpdw\" (UniqueName: \"kubernetes.io/projected/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-kube-api-access-7cpdw\") pod \"redhat-operators-czlvh\" (UID: \"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a\") " pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.212524 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-utilities\") pod \"redhat-operators-czlvh\" (UID: \"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a\") " pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.212638 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-catalog-content\") pod \"redhat-operators-czlvh\" (UID: \"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a\") " pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.314708 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-utilities\") pod \"redhat-operators-czlvh\" (UID: \"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a\") " pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.314805 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-catalog-content\") pod \"redhat-operators-czlvh\" (UID: \"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a\") " pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.314945 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cpdw\" (UniqueName: \"kubernetes.io/projected/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-kube-api-access-7cpdw\") pod \"redhat-operators-czlvh\" (UID: \"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a\") " pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.315330 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-utilities\") pod \"redhat-operators-czlvh\" (UID: \"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a\") " pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.315501 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-catalog-content\") pod \"redhat-operators-czlvh\" (UID: \"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a\") " pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.342379 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cpdw\" (UniqueName: \"kubernetes.io/projected/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-kube-api-access-7cpdw\") pod \"redhat-operators-czlvh\" (UID: \"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a\") " pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.437685 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.894380 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-czlvh"] Oct 06 22:41:55 crc kubenswrapper[5014]: I1006 22:41:55.918376 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-czlvh" event={"ID":"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a","Type":"ContainerStarted","Data":"acc74d540e276c71bc6fedb9a3efa9ddfdea7477c193d95f9f9393fd912f464f"} Oct 06 22:41:56 crc kubenswrapper[5014]: I1006 22:41:56.933951 5014 generic.go:334] "Generic (PLEG): container finished" podID="5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a" containerID="11c8948e47f8492962fa3128e45e3ec18a25f74997958dbe19b7f76895155873" exitCode=0 Oct 06 22:41:56 crc kubenswrapper[5014]: I1006 22:41:56.934046 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-czlvh" event={"ID":"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a","Type":"ContainerDied","Data":"11c8948e47f8492962fa3128e45e3ec18a25f74997958dbe19b7f76895155873"} Oct 06 22:41:56 crc kubenswrapper[5014]: I1006 22:41:56.938184 5014 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 06 22:41:57 crc kubenswrapper[5014]: I1006 22:41:57.961535 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-czlvh" event={"ID":"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a","Type":"ContainerStarted","Data":"a73f469916af5c9931cf1eebce200573a0728ce22001cbe09ed34f298aa0f462"} Oct 06 22:41:58 crc kubenswrapper[5014]: I1006 22:41:58.974277 5014 generic.go:334] "Generic (PLEG): container finished" podID="5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a" containerID="a73f469916af5c9931cf1eebce200573a0728ce22001cbe09ed34f298aa0f462" exitCode=0 Oct 06 22:41:58 crc kubenswrapper[5014]: I1006 22:41:58.974410 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-czlvh" event={"ID":"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a","Type":"ContainerDied","Data":"a73f469916af5c9931cf1eebce200573a0728ce22001cbe09ed34f298aa0f462"} Oct 06 22:41:59 crc kubenswrapper[5014]: I1006 22:41:59.987371 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-czlvh" event={"ID":"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a","Type":"ContainerStarted","Data":"2d5b95b8f2f34f93e6de8eb031eb95d0b12d9e42b8b0985c840140be2db6487a"} Oct 06 22:42:00 crc kubenswrapper[5014]: I1006 22:42:00.027951 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-czlvh" podStartSLOduration=2.6031312680000003 podStartE2EDuration="5.027925252s" podCreationTimestamp="2025-10-06 22:41:55 +0000 UTC" firstStartedPulling="2025-10-06 22:41:56.937767382 +0000 UTC m=+4262.230804156" lastFinishedPulling="2025-10-06 22:41:59.362561376 +0000 UTC m=+4264.655598140" observedRunningTime="2025-10-06 22:42:00.023035418 +0000 UTC m=+4265.316072192" watchObservedRunningTime="2025-10-06 22:42:00.027925252 +0000 UTC m=+4265.320962026" Oct 06 22:42:05 crc kubenswrapper[5014]: I1006 22:42:05.438263 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:42:05 crc kubenswrapper[5014]: I1006 22:42:05.438937 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:42:06 crc kubenswrapper[5014]: I1006 22:42:06.510352 5014 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-czlvh" podUID="5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a" containerName="registry-server" probeResult="failure" output=< Oct 06 22:42:06 crc kubenswrapper[5014]: timeout: failed to connect service ":50051" within 1s Oct 06 22:42:06 crc kubenswrapper[5014]: > Oct 06 22:42:15 crc kubenswrapper[5014]: I1006 22:42:15.517816 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:42:15 crc kubenswrapper[5014]: I1006 22:42:15.590134 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:42:15 crc kubenswrapper[5014]: I1006 22:42:15.766910 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-czlvh"] Oct 06 22:42:17 crc kubenswrapper[5014]: I1006 22:42:17.160183 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-czlvh" podUID="5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a" containerName="registry-server" containerID="cri-o://2d5b95b8f2f34f93e6de8eb031eb95d0b12d9e42b8b0985c840140be2db6487a" gracePeriod=2 Oct 06 22:42:17 crc kubenswrapper[5014]: I1006 22:42:17.595751 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:42:17 crc kubenswrapper[5014]: I1006 22:42:17.774387 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-catalog-content\") pod \"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a\" (UID: \"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a\") " Oct 06 22:42:17 crc kubenswrapper[5014]: I1006 22:42:17.774573 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cpdw\" (UniqueName: \"kubernetes.io/projected/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-kube-api-access-7cpdw\") pod \"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a\" (UID: \"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a\") " Oct 06 22:42:17 crc kubenswrapper[5014]: I1006 22:42:17.774776 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-utilities\") pod \"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a\" (UID: \"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a\") " Oct 06 22:42:17 crc kubenswrapper[5014]: I1006 22:42:17.775898 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-utilities" (OuterVolumeSpecName: "utilities") pod "5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a" (UID: "5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:42:17 crc kubenswrapper[5014]: I1006 22:42:17.783436 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-kube-api-access-7cpdw" (OuterVolumeSpecName: "kube-api-access-7cpdw") pod "5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a" (UID: "5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a"). InnerVolumeSpecName "kube-api-access-7cpdw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:42:17 crc kubenswrapper[5014]: I1006 22:42:17.876338 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cpdw\" (UniqueName: \"kubernetes.io/projected/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-kube-api-access-7cpdw\") on node \"crc\" DevicePath \"\"" Oct 06 22:42:17 crc kubenswrapper[5014]: I1006 22:42:17.876372 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:42:17 crc kubenswrapper[5014]: I1006 22:42:17.881533 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a" (UID: "5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:42:17 crc kubenswrapper[5014]: I1006 22:42:17.977548 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:42:18 crc kubenswrapper[5014]: I1006 22:42:18.174665 5014 generic.go:334] "Generic (PLEG): container finished" podID="5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a" containerID="2d5b95b8f2f34f93e6de8eb031eb95d0b12d9e42b8b0985c840140be2db6487a" exitCode=0 Oct 06 22:42:18 crc kubenswrapper[5014]: I1006 22:42:18.174729 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-czlvh" event={"ID":"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a","Type":"ContainerDied","Data":"2d5b95b8f2f34f93e6de8eb031eb95d0b12d9e42b8b0985c840140be2db6487a"} Oct 06 22:42:18 crc kubenswrapper[5014]: I1006 22:42:18.174781 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-czlvh" event={"ID":"5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a","Type":"ContainerDied","Data":"acc74d540e276c71bc6fedb9a3efa9ddfdea7477c193d95f9f9393fd912f464f"} Oct 06 22:42:18 crc kubenswrapper[5014]: I1006 22:42:18.174786 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-czlvh" Oct 06 22:42:18 crc kubenswrapper[5014]: I1006 22:42:18.174811 5014 scope.go:117] "RemoveContainer" containerID="2d5b95b8f2f34f93e6de8eb031eb95d0b12d9e42b8b0985c840140be2db6487a" Oct 06 22:42:18 crc kubenswrapper[5014]: I1006 22:42:18.205593 5014 scope.go:117] "RemoveContainer" containerID="a73f469916af5c9931cf1eebce200573a0728ce22001cbe09ed34f298aa0f462" Oct 06 22:42:18 crc kubenswrapper[5014]: I1006 22:42:18.232126 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-czlvh"] Oct 06 22:42:18 crc kubenswrapper[5014]: I1006 22:42:18.237834 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-czlvh"] Oct 06 22:42:18 crc kubenswrapper[5014]: I1006 22:42:18.240959 5014 scope.go:117] "RemoveContainer" containerID="11c8948e47f8492962fa3128e45e3ec18a25f74997958dbe19b7f76895155873" Oct 06 22:42:18 crc kubenswrapper[5014]: I1006 22:42:18.279416 5014 scope.go:117] "RemoveContainer" containerID="2d5b95b8f2f34f93e6de8eb031eb95d0b12d9e42b8b0985c840140be2db6487a" Oct 06 22:42:18 crc kubenswrapper[5014]: E1006 22:42:18.280131 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d5b95b8f2f34f93e6de8eb031eb95d0b12d9e42b8b0985c840140be2db6487a\": container with ID starting with 2d5b95b8f2f34f93e6de8eb031eb95d0b12d9e42b8b0985c840140be2db6487a not found: ID does not exist" containerID="2d5b95b8f2f34f93e6de8eb031eb95d0b12d9e42b8b0985c840140be2db6487a" Oct 06 22:42:18 crc kubenswrapper[5014]: I1006 22:42:18.280202 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d5b95b8f2f34f93e6de8eb031eb95d0b12d9e42b8b0985c840140be2db6487a"} err="failed to get container status \"2d5b95b8f2f34f93e6de8eb031eb95d0b12d9e42b8b0985c840140be2db6487a\": rpc error: code = NotFound desc = could not find container \"2d5b95b8f2f34f93e6de8eb031eb95d0b12d9e42b8b0985c840140be2db6487a\": container with ID starting with 2d5b95b8f2f34f93e6de8eb031eb95d0b12d9e42b8b0985c840140be2db6487a not found: ID does not exist" Oct 06 22:42:18 crc kubenswrapper[5014]: I1006 22:42:18.280249 5014 scope.go:117] "RemoveContainer" containerID="a73f469916af5c9931cf1eebce200573a0728ce22001cbe09ed34f298aa0f462" Oct 06 22:42:18 crc kubenswrapper[5014]: E1006 22:42:18.280818 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a73f469916af5c9931cf1eebce200573a0728ce22001cbe09ed34f298aa0f462\": container with ID starting with a73f469916af5c9931cf1eebce200573a0728ce22001cbe09ed34f298aa0f462 not found: ID does not exist" containerID="a73f469916af5c9931cf1eebce200573a0728ce22001cbe09ed34f298aa0f462" Oct 06 22:42:18 crc kubenswrapper[5014]: I1006 22:42:18.280884 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a73f469916af5c9931cf1eebce200573a0728ce22001cbe09ed34f298aa0f462"} err="failed to get container status \"a73f469916af5c9931cf1eebce200573a0728ce22001cbe09ed34f298aa0f462\": rpc error: code = NotFound desc = could not find container \"a73f469916af5c9931cf1eebce200573a0728ce22001cbe09ed34f298aa0f462\": container with ID starting with a73f469916af5c9931cf1eebce200573a0728ce22001cbe09ed34f298aa0f462 not found: ID does not exist" Oct 06 22:42:18 crc kubenswrapper[5014]: I1006 22:42:18.280935 5014 scope.go:117] "RemoveContainer" containerID="11c8948e47f8492962fa3128e45e3ec18a25f74997958dbe19b7f76895155873" Oct 06 22:42:18 crc kubenswrapper[5014]: E1006 22:42:18.281592 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11c8948e47f8492962fa3128e45e3ec18a25f74997958dbe19b7f76895155873\": container with ID starting with 11c8948e47f8492962fa3128e45e3ec18a25f74997958dbe19b7f76895155873 not found: ID does not exist" containerID="11c8948e47f8492962fa3128e45e3ec18a25f74997958dbe19b7f76895155873" Oct 06 22:42:18 crc kubenswrapper[5014]: I1006 22:42:18.281692 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11c8948e47f8492962fa3128e45e3ec18a25f74997958dbe19b7f76895155873"} err="failed to get container status \"11c8948e47f8492962fa3128e45e3ec18a25f74997958dbe19b7f76895155873\": rpc error: code = NotFound desc = could not find container \"11c8948e47f8492962fa3128e45e3ec18a25f74997958dbe19b7f76895155873\": container with ID starting with 11c8948e47f8492962fa3128e45e3ec18a25f74997958dbe19b7f76895155873 not found: ID does not exist" Oct 06 22:42:19 crc kubenswrapper[5014]: I1006 22:42:19.500945 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a" path="/var/lib/kubelet/pods/5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a/volumes" Oct 06 22:42:21 crc kubenswrapper[5014]: I1006 22:42:21.735092 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:42:21 crc kubenswrapper[5014]: I1006 22:42:21.735190 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:42:51 crc kubenswrapper[5014]: I1006 22:42:51.735363 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:42:51 crc kubenswrapper[5014]: I1006 22:42:51.736051 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:42:51 crc kubenswrapper[5014]: I1006 22:42:51.736117 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 22:42:51 crc kubenswrapper[5014]: I1006 22:42:51.737140 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e797c99f3d7250575c0fe41e7b21e8ec126de5e18dd0bbd484c427d2d4fa6e36"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 22:42:51 crc kubenswrapper[5014]: I1006 22:42:51.737237 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://e797c99f3d7250575c0fe41e7b21e8ec126de5e18dd0bbd484c427d2d4fa6e36" gracePeriod=600 Oct 06 22:42:52 crc kubenswrapper[5014]: I1006 22:42:52.530282 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="e797c99f3d7250575c0fe41e7b21e8ec126de5e18dd0bbd484c427d2d4fa6e36" exitCode=0 Oct 06 22:42:52 crc kubenswrapper[5014]: I1006 22:42:52.530408 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"e797c99f3d7250575c0fe41e7b21e8ec126de5e18dd0bbd484c427d2d4fa6e36"} Oct 06 22:42:52 crc kubenswrapper[5014]: I1006 22:42:52.531171 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae"} Oct 06 22:42:52 crc kubenswrapper[5014]: I1006 22:42:52.531251 5014 scope.go:117] "RemoveContainer" containerID="aec4228c1ce8ab4d874ed8abc59eb3115aa0dcaac700a3bac856f726105e7416" Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.509372 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ltg9p"] Oct 06 22:43:44 crc kubenswrapper[5014]: E1006 22:43:44.511405 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a" containerName="registry-server" Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.511557 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a" containerName="registry-server" Oct 06 22:43:44 crc kubenswrapper[5014]: E1006 22:43:44.511687 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a" containerName="extract-content" Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.511773 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a" containerName="extract-content" Oct 06 22:43:44 crc kubenswrapper[5014]: E1006 22:43:44.511870 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a" containerName="extract-utilities" Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.511944 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a" containerName="extract-utilities" Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.512206 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a17ce93-fd6a-4cdc-941c-3483dd1a5b0a" containerName="registry-server" Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.513553 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.520356 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ltg9p"] Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.651107 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6hcm\" (UniqueName: \"kubernetes.io/projected/09352ab1-154c-4119-b60b-cedbec798e24-kube-api-access-k6hcm\") pod \"redhat-marketplace-ltg9p\" (UID: \"09352ab1-154c-4119-b60b-cedbec798e24\") " pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.651490 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09352ab1-154c-4119-b60b-cedbec798e24-catalog-content\") pod \"redhat-marketplace-ltg9p\" (UID: \"09352ab1-154c-4119-b60b-cedbec798e24\") " pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.651537 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09352ab1-154c-4119-b60b-cedbec798e24-utilities\") pod \"redhat-marketplace-ltg9p\" (UID: \"09352ab1-154c-4119-b60b-cedbec798e24\") " pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.753396 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6hcm\" (UniqueName: \"kubernetes.io/projected/09352ab1-154c-4119-b60b-cedbec798e24-kube-api-access-k6hcm\") pod \"redhat-marketplace-ltg9p\" (UID: \"09352ab1-154c-4119-b60b-cedbec798e24\") " pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.753506 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09352ab1-154c-4119-b60b-cedbec798e24-catalog-content\") pod \"redhat-marketplace-ltg9p\" (UID: \"09352ab1-154c-4119-b60b-cedbec798e24\") " pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.753552 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09352ab1-154c-4119-b60b-cedbec798e24-utilities\") pod \"redhat-marketplace-ltg9p\" (UID: \"09352ab1-154c-4119-b60b-cedbec798e24\") " pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.754232 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09352ab1-154c-4119-b60b-cedbec798e24-utilities\") pod \"redhat-marketplace-ltg9p\" (UID: \"09352ab1-154c-4119-b60b-cedbec798e24\") " pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.754612 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09352ab1-154c-4119-b60b-cedbec798e24-catalog-content\") pod \"redhat-marketplace-ltg9p\" (UID: \"09352ab1-154c-4119-b60b-cedbec798e24\") " pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.793997 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6hcm\" (UniqueName: \"kubernetes.io/projected/09352ab1-154c-4119-b60b-cedbec798e24-kube-api-access-k6hcm\") pod \"redhat-marketplace-ltg9p\" (UID: \"09352ab1-154c-4119-b60b-cedbec798e24\") " pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:44 crc kubenswrapper[5014]: I1006 22:43:44.857493 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:45 crc kubenswrapper[5014]: I1006 22:43:45.327458 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ltg9p"] Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.049980 5014 generic.go:334] "Generic (PLEG): container finished" podID="09352ab1-154c-4119-b60b-cedbec798e24" containerID="4a8c08e5d062ca8982d05a5db501e05bd9ecbe0c914834d7d5c190ff7d05df0c" exitCode=0 Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.050041 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ltg9p" event={"ID":"09352ab1-154c-4119-b60b-cedbec798e24","Type":"ContainerDied","Data":"4a8c08e5d062ca8982d05a5db501e05bd9ecbe0c914834d7d5c190ff7d05df0c"} Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.050122 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ltg9p" event={"ID":"09352ab1-154c-4119-b60b-cedbec798e24","Type":"ContainerStarted","Data":"5397161bdb2449f025525d4f43a5661b394df406c2db3fdebf97dcc28760f5de"} Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.310482 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jmnx7"] Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.314171 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.331659 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jmnx7"] Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.481341 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2wrb\" (UniqueName: \"kubernetes.io/projected/b5612ea0-3c83-456e-b578-59f288fdf1b7-kube-api-access-p2wrb\") pod \"certified-operators-jmnx7\" (UID: \"b5612ea0-3c83-456e-b578-59f288fdf1b7\") " pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.481461 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5612ea0-3c83-456e-b578-59f288fdf1b7-catalog-content\") pod \"certified-operators-jmnx7\" (UID: \"b5612ea0-3c83-456e-b578-59f288fdf1b7\") " pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.481497 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5612ea0-3c83-456e-b578-59f288fdf1b7-utilities\") pod \"certified-operators-jmnx7\" (UID: \"b5612ea0-3c83-456e-b578-59f288fdf1b7\") " pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.582833 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2wrb\" (UniqueName: \"kubernetes.io/projected/b5612ea0-3c83-456e-b578-59f288fdf1b7-kube-api-access-p2wrb\") pod \"certified-operators-jmnx7\" (UID: \"b5612ea0-3c83-456e-b578-59f288fdf1b7\") " pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.583058 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5612ea0-3c83-456e-b578-59f288fdf1b7-catalog-content\") pod \"certified-operators-jmnx7\" (UID: \"b5612ea0-3c83-456e-b578-59f288fdf1b7\") " pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.583189 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5612ea0-3c83-456e-b578-59f288fdf1b7-utilities\") pod \"certified-operators-jmnx7\" (UID: \"b5612ea0-3c83-456e-b578-59f288fdf1b7\") " pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.583740 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5612ea0-3c83-456e-b578-59f288fdf1b7-catalog-content\") pod \"certified-operators-jmnx7\" (UID: \"b5612ea0-3c83-456e-b578-59f288fdf1b7\") " pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.583929 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5612ea0-3c83-456e-b578-59f288fdf1b7-utilities\") pod \"certified-operators-jmnx7\" (UID: \"b5612ea0-3c83-456e-b578-59f288fdf1b7\") " pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.607004 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2wrb\" (UniqueName: \"kubernetes.io/projected/b5612ea0-3c83-456e-b578-59f288fdf1b7-kube-api-access-p2wrb\") pod \"certified-operators-jmnx7\" (UID: \"b5612ea0-3c83-456e-b578-59f288fdf1b7\") " pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.680878 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.903313 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-m86f8"] Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.905322 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.924004 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-m86f8"] Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.993715 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16c3f436-465f-4853-a45e-c4c42d880bfe-catalog-content\") pod \"community-operators-m86f8\" (UID: \"16c3f436-465f-4853-a45e-c4c42d880bfe\") " pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.993753 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zt26\" (UniqueName: \"kubernetes.io/projected/16c3f436-465f-4853-a45e-c4c42d880bfe-kube-api-access-5zt26\") pod \"community-operators-m86f8\" (UID: \"16c3f436-465f-4853-a45e-c4c42d880bfe\") " pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:43:46 crc kubenswrapper[5014]: I1006 22:43:46.993832 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16c3f436-465f-4853-a45e-c4c42d880bfe-utilities\") pod \"community-operators-m86f8\" (UID: \"16c3f436-465f-4853-a45e-c4c42d880bfe\") " pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:43:47 crc kubenswrapper[5014]: I1006 22:43:47.096238 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16c3f436-465f-4853-a45e-c4c42d880bfe-catalog-content\") pod \"community-operators-m86f8\" (UID: \"16c3f436-465f-4853-a45e-c4c42d880bfe\") " pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:43:47 crc kubenswrapper[5014]: I1006 22:43:47.096302 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zt26\" (UniqueName: \"kubernetes.io/projected/16c3f436-465f-4853-a45e-c4c42d880bfe-kube-api-access-5zt26\") pod \"community-operators-m86f8\" (UID: \"16c3f436-465f-4853-a45e-c4c42d880bfe\") " pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:43:47 crc kubenswrapper[5014]: I1006 22:43:47.096489 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16c3f436-465f-4853-a45e-c4c42d880bfe-utilities\") pod \"community-operators-m86f8\" (UID: \"16c3f436-465f-4853-a45e-c4c42d880bfe\") " pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:43:47 crc kubenswrapper[5014]: I1006 22:43:47.097032 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16c3f436-465f-4853-a45e-c4c42d880bfe-catalog-content\") pod \"community-operators-m86f8\" (UID: \"16c3f436-465f-4853-a45e-c4c42d880bfe\") " pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:43:47 crc kubenswrapper[5014]: I1006 22:43:47.097179 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16c3f436-465f-4853-a45e-c4c42d880bfe-utilities\") pod \"community-operators-m86f8\" (UID: \"16c3f436-465f-4853-a45e-c4c42d880bfe\") " pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:43:47 crc kubenswrapper[5014]: I1006 22:43:47.122684 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zt26\" (UniqueName: \"kubernetes.io/projected/16c3f436-465f-4853-a45e-c4c42d880bfe-kube-api-access-5zt26\") pod \"community-operators-m86f8\" (UID: \"16c3f436-465f-4853-a45e-c4c42d880bfe\") " pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:43:47 crc kubenswrapper[5014]: I1006 22:43:47.188938 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jmnx7"] Oct 06 22:43:47 crc kubenswrapper[5014]: I1006 22:43:47.249112 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:43:47 crc kubenswrapper[5014]: I1006 22:43:47.731263 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-m86f8"] Oct 06 22:43:47 crc kubenswrapper[5014]: W1006 22:43:47.735358 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod16c3f436_465f_4853_a45e_c4c42d880bfe.slice/crio-a8af4086bc2f8408791b70813da357bb6b4c5770430f634b049cd285ab208340 WatchSource:0}: Error finding container a8af4086bc2f8408791b70813da357bb6b4c5770430f634b049cd285ab208340: Status 404 returned error can't find the container with id a8af4086bc2f8408791b70813da357bb6b4c5770430f634b049cd285ab208340 Oct 06 22:43:48 crc kubenswrapper[5014]: I1006 22:43:48.067797 5014 generic.go:334] "Generic (PLEG): container finished" podID="16c3f436-465f-4853-a45e-c4c42d880bfe" containerID="16b4b5ff668a9b05717afc2668f9b3c82a0e148b526643994fccfd5e51240fcb" exitCode=0 Oct 06 22:43:48 crc kubenswrapper[5014]: I1006 22:43:48.067960 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m86f8" event={"ID":"16c3f436-465f-4853-a45e-c4c42d880bfe","Type":"ContainerDied","Data":"16b4b5ff668a9b05717afc2668f9b3c82a0e148b526643994fccfd5e51240fcb"} Oct 06 22:43:48 crc kubenswrapper[5014]: I1006 22:43:48.068018 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m86f8" event={"ID":"16c3f436-465f-4853-a45e-c4c42d880bfe","Type":"ContainerStarted","Data":"a8af4086bc2f8408791b70813da357bb6b4c5770430f634b049cd285ab208340"} Oct 06 22:43:48 crc kubenswrapper[5014]: I1006 22:43:48.075026 5014 generic.go:334] "Generic (PLEG): container finished" podID="09352ab1-154c-4119-b60b-cedbec798e24" containerID="02fc54f063864c68706c5d4d4eaa4c6fc397684cddaffb904b08fb3f3cf5a388" exitCode=0 Oct 06 22:43:48 crc kubenswrapper[5014]: I1006 22:43:48.075105 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ltg9p" event={"ID":"09352ab1-154c-4119-b60b-cedbec798e24","Type":"ContainerDied","Data":"02fc54f063864c68706c5d4d4eaa4c6fc397684cddaffb904b08fb3f3cf5a388"} Oct 06 22:43:48 crc kubenswrapper[5014]: I1006 22:43:48.079200 5014 generic.go:334] "Generic (PLEG): container finished" podID="b5612ea0-3c83-456e-b578-59f288fdf1b7" containerID="a524647b45f746745d5f7667d15ca187e40a5d56e355ab8c635e1c59a0c057e6" exitCode=0 Oct 06 22:43:48 crc kubenswrapper[5014]: I1006 22:43:48.079232 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jmnx7" event={"ID":"b5612ea0-3c83-456e-b578-59f288fdf1b7","Type":"ContainerDied","Data":"a524647b45f746745d5f7667d15ca187e40a5d56e355ab8c635e1c59a0c057e6"} Oct 06 22:43:48 crc kubenswrapper[5014]: I1006 22:43:48.079257 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jmnx7" event={"ID":"b5612ea0-3c83-456e-b578-59f288fdf1b7","Type":"ContainerStarted","Data":"9de68e5f61463b0539d0a47ee387439d139408f353fcf8dcacc8a80f99ed1088"} Oct 06 22:43:49 crc kubenswrapper[5014]: I1006 22:43:49.098049 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ltg9p" event={"ID":"09352ab1-154c-4119-b60b-cedbec798e24","Type":"ContainerStarted","Data":"28ac78ca32d7288259303d1195933f405ac831e33eecedfa3dfc543efe1d661b"} Oct 06 22:43:49 crc kubenswrapper[5014]: I1006 22:43:49.100642 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jmnx7" event={"ID":"b5612ea0-3c83-456e-b578-59f288fdf1b7","Type":"ContainerStarted","Data":"a34d01a0e0cd39739acc0542f649882b2216b6df2f6101331d8573eb39c82ae4"} Oct 06 22:43:49 crc kubenswrapper[5014]: I1006 22:43:49.104461 5014 generic.go:334] "Generic (PLEG): container finished" podID="16c3f436-465f-4853-a45e-c4c42d880bfe" containerID="dc33770cd6fd248143d949231a0f237a51790d698a8410d87e73b812f2a7c382" exitCode=0 Oct 06 22:43:49 crc kubenswrapper[5014]: I1006 22:43:49.104511 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m86f8" event={"ID":"16c3f436-465f-4853-a45e-c4c42d880bfe","Type":"ContainerDied","Data":"dc33770cd6fd248143d949231a0f237a51790d698a8410d87e73b812f2a7c382"} Oct 06 22:43:49 crc kubenswrapper[5014]: I1006 22:43:49.129241 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ltg9p" podStartSLOduration=2.419577904 podStartE2EDuration="5.129221673s" podCreationTimestamp="2025-10-06 22:43:44 +0000 UTC" firstStartedPulling="2025-10-06 22:43:46.054269551 +0000 UTC m=+4371.347306325" lastFinishedPulling="2025-10-06 22:43:48.76391336 +0000 UTC m=+4374.056950094" observedRunningTime="2025-10-06 22:43:49.124361081 +0000 UTC m=+4374.417397815" watchObservedRunningTime="2025-10-06 22:43:49.129221673 +0000 UTC m=+4374.422258417" Oct 06 22:43:50 crc kubenswrapper[5014]: I1006 22:43:50.116850 5014 generic.go:334] "Generic (PLEG): container finished" podID="b5612ea0-3c83-456e-b578-59f288fdf1b7" containerID="a34d01a0e0cd39739acc0542f649882b2216b6df2f6101331d8573eb39c82ae4" exitCode=0 Oct 06 22:43:50 crc kubenswrapper[5014]: I1006 22:43:50.116953 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jmnx7" event={"ID":"b5612ea0-3c83-456e-b578-59f288fdf1b7","Type":"ContainerDied","Data":"a34d01a0e0cd39739acc0542f649882b2216b6df2f6101331d8573eb39c82ae4"} Oct 06 22:43:50 crc kubenswrapper[5014]: I1006 22:43:50.120598 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m86f8" event={"ID":"16c3f436-465f-4853-a45e-c4c42d880bfe","Type":"ContainerStarted","Data":"180e533a1d2765a56b7a3045baba6bb58ebdc2fc0255c2b59590f6dcceff8f00"} Oct 06 22:43:50 crc kubenswrapper[5014]: I1006 22:43:50.183289 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-m86f8" podStartSLOduration=2.6100330339999998 podStartE2EDuration="4.183255063s" podCreationTimestamp="2025-10-06 22:43:46 +0000 UTC" firstStartedPulling="2025-10-06 22:43:48.070008782 +0000 UTC m=+4373.363045516" lastFinishedPulling="2025-10-06 22:43:49.643230781 +0000 UTC m=+4374.936267545" observedRunningTime="2025-10-06 22:43:50.172042743 +0000 UTC m=+4375.465079527" watchObservedRunningTime="2025-10-06 22:43:50.183255063 +0000 UTC m=+4375.476291837" Oct 06 22:43:51 crc kubenswrapper[5014]: I1006 22:43:51.133298 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jmnx7" event={"ID":"b5612ea0-3c83-456e-b578-59f288fdf1b7","Type":"ContainerStarted","Data":"9b070d928cbb66182ec0dad771f32b6c4ac9c787513efafa8d9086a75c9d1ced"} Oct 06 22:43:51 crc kubenswrapper[5014]: I1006 22:43:51.156317 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jmnx7" podStartSLOduration=2.735586623 podStartE2EDuration="5.156299369s" podCreationTimestamp="2025-10-06 22:43:46 +0000 UTC" firstStartedPulling="2025-10-06 22:43:48.082305086 +0000 UTC m=+4373.375341850" lastFinishedPulling="2025-10-06 22:43:50.503017822 +0000 UTC m=+4375.796054596" observedRunningTime="2025-10-06 22:43:51.151313302 +0000 UTC m=+4376.444350046" watchObservedRunningTime="2025-10-06 22:43:51.156299369 +0000 UTC m=+4376.449336113" Oct 06 22:43:54 crc kubenswrapper[5014]: I1006 22:43:54.857717 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:54 crc kubenswrapper[5014]: I1006 22:43:54.858127 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:54 crc kubenswrapper[5014]: I1006 22:43:54.935319 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:55 crc kubenswrapper[5014]: I1006 22:43:55.249094 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:56 crc kubenswrapper[5014]: I1006 22:43:56.299386 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ltg9p"] Oct 06 22:43:56 crc kubenswrapper[5014]: I1006 22:43:56.682430 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:43:56 crc kubenswrapper[5014]: I1006 22:43:56.683740 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:43:56 crc kubenswrapper[5014]: I1006 22:43:56.777409 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:43:57 crc kubenswrapper[5014]: I1006 22:43:57.199110 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ltg9p" podUID="09352ab1-154c-4119-b60b-cedbec798e24" containerName="registry-server" containerID="cri-o://28ac78ca32d7288259303d1195933f405ac831e33eecedfa3dfc543efe1d661b" gracePeriod=2 Oct 06 22:43:57 crc kubenswrapper[5014]: I1006 22:43:57.249667 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:43:57 crc kubenswrapper[5014]: I1006 22:43:57.249758 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:43:57 crc kubenswrapper[5014]: I1006 22:43:57.269012 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:43:57 crc kubenswrapper[5014]: I1006 22:43:57.331499 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:43:57 crc kubenswrapper[5014]: I1006 22:43:57.859541 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:57 crc kubenswrapper[5014]: I1006 22:43:57.985851 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6hcm\" (UniqueName: \"kubernetes.io/projected/09352ab1-154c-4119-b60b-cedbec798e24-kube-api-access-k6hcm\") pod \"09352ab1-154c-4119-b60b-cedbec798e24\" (UID: \"09352ab1-154c-4119-b60b-cedbec798e24\") " Oct 06 22:43:57 crc kubenswrapper[5014]: I1006 22:43:57.985927 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09352ab1-154c-4119-b60b-cedbec798e24-utilities\") pod \"09352ab1-154c-4119-b60b-cedbec798e24\" (UID: \"09352ab1-154c-4119-b60b-cedbec798e24\") " Oct 06 22:43:57 crc kubenswrapper[5014]: I1006 22:43:57.986024 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09352ab1-154c-4119-b60b-cedbec798e24-catalog-content\") pod \"09352ab1-154c-4119-b60b-cedbec798e24\" (UID: \"09352ab1-154c-4119-b60b-cedbec798e24\") " Oct 06 22:43:57 crc kubenswrapper[5014]: I1006 22:43:57.987035 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09352ab1-154c-4119-b60b-cedbec798e24-utilities" (OuterVolumeSpecName: "utilities") pod "09352ab1-154c-4119-b60b-cedbec798e24" (UID: "09352ab1-154c-4119-b60b-cedbec798e24"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:43:57 crc kubenswrapper[5014]: I1006 22:43:57.994443 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09352ab1-154c-4119-b60b-cedbec798e24-kube-api-access-k6hcm" (OuterVolumeSpecName: "kube-api-access-k6hcm") pod "09352ab1-154c-4119-b60b-cedbec798e24" (UID: "09352ab1-154c-4119-b60b-cedbec798e24"). InnerVolumeSpecName "kube-api-access-k6hcm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.002853 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09352ab1-154c-4119-b60b-cedbec798e24-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "09352ab1-154c-4119-b60b-cedbec798e24" (UID: "09352ab1-154c-4119-b60b-cedbec798e24"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.087537 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09352ab1-154c-4119-b60b-cedbec798e24-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.087583 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6hcm\" (UniqueName: \"kubernetes.io/projected/09352ab1-154c-4119-b60b-cedbec798e24-kube-api-access-k6hcm\") on node \"crc\" DevicePath \"\"" Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.087598 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09352ab1-154c-4119-b60b-cedbec798e24-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.206462 5014 generic.go:334] "Generic (PLEG): container finished" podID="09352ab1-154c-4119-b60b-cedbec798e24" containerID="28ac78ca32d7288259303d1195933f405ac831e33eecedfa3dfc543efe1d661b" exitCode=0 Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.207381 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ltg9p" Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.208771 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ltg9p" event={"ID":"09352ab1-154c-4119-b60b-cedbec798e24","Type":"ContainerDied","Data":"28ac78ca32d7288259303d1195933f405ac831e33eecedfa3dfc543efe1d661b"} Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.208819 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ltg9p" event={"ID":"09352ab1-154c-4119-b60b-cedbec798e24","Type":"ContainerDied","Data":"5397161bdb2449f025525d4f43a5661b394df406c2db3fdebf97dcc28760f5de"} Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.208841 5014 scope.go:117] "RemoveContainer" containerID="28ac78ca32d7288259303d1195933f405ac831e33eecedfa3dfc543efe1d661b" Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.240106 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ltg9p"] Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.246657 5014 scope.go:117] "RemoveContainer" containerID="02fc54f063864c68706c5d4d4eaa4c6fc397684cddaffb904b08fb3f3cf5a388" Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.250909 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ltg9p"] Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.267690 5014 scope.go:117] "RemoveContainer" containerID="4a8c08e5d062ca8982d05a5db501e05bd9ecbe0c914834d7d5c190ff7d05df0c" Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.274603 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.322861 5014 scope.go:117] "RemoveContainer" containerID="28ac78ca32d7288259303d1195933f405ac831e33eecedfa3dfc543efe1d661b" Oct 06 22:43:58 crc kubenswrapper[5014]: E1006 22:43:58.326133 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28ac78ca32d7288259303d1195933f405ac831e33eecedfa3dfc543efe1d661b\": container with ID starting with 28ac78ca32d7288259303d1195933f405ac831e33eecedfa3dfc543efe1d661b not found: ID does not exist" containerID="28ac78ca32d7288259303d1195933f405ac831e33eecedfa3dfc543efe1d661b" Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.326177 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28ac78ca32d7288259303d1195933f405ac831e33eecedfa3dfc543efe1d661b"} err="failed to get container status \"28ac78ca32d7288259303d1195933f405ac831e33eecedfa3dfc543efe1d661b\": rpc error: code = NotFound desc = could not find container \"28ac78ca32d7288259303d1195933f405ac831e33eecedfa3dfc543efe1d661b\": container with ID starting with 28ac78ca32d7288259303d1195933f405ac831e33eecedfa3dfc543efe1d661b not found: ID does not exist" Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.326211 5014 scope.go:117] "RemoveContainer" containerID="02fc54f063864c68706c5d4d4eaa4c6fc397684cddaffb904b08fb3f3cf5a388" Oct 06 22:43:58 crc kubenswrapper[5014]: E1006 22:43:58.327084 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02fc54f063864c68706c5d4d4eaa4c6fc397684cddaffb904b08fb3f3cf5a388\": container with ID starting with 02fc54f063864c68706c5d4d4eaa4c6fc397684cddaffb904b08fb3f3cf5a388 not found: ID does not exist" containerID="02fc54f063864c68706c5d4d4eaa4c6fc397684cddaffb904b08fb3f3cf5a388" Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.327118 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02fc54f063864c68706c5d4d4eaa4c6fc397684cddaffb904b08fb3f3cf5a388"} err="failed to get container status \"02fc54f063864c68706c5d4d4eaa4c6fc397684cddaffb904b08fb3f3cf5a388\": rpc error: code = NotFound desc = could not find container \"02fc54f063864c68706c5d4d4eaa4c6fc397684cddaffb904b08fb3f3cf5a388\": container with ID starting with 02fc54f063864c68706c5d4d4eaa4c6fc397684cddaffb904b08fb3f3cf5a388 not found: ID does not exist" Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.327146 5014 scope.go:117] "RemoveContainer" containerID="4a8c08e5d062ca8982d05a5db501e05bd9ecbe0c914834d7d5c190ff7d05df0c" Oct 06 22:43:58 crc kubenswrapper[5014]: E1006 22:43:58.327386 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a8c08e5d062ca8982d05a5db501e05bd9ecbe0c914834d7d5c190ff7d05df0c\": container with ID starting with 4a8c08e5d062ca8982d05a5db501e05bd9ecbe0c914834d7d5c190ff7d05df0c not found: ID does not exist" containerID="4a8c08e5d062ca8982d05a5db501e05bd9ecbe0c914834d7d5c190ff7d05df0c" Oct 06 22:43:58 crc kubenswrapper[5014]: I1006 22:43:58.327408 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a8c08e5d062ca8982d05a5db501e05bd9ecbe0c914834d7d5c190ff7d05df0c"} err="failed to get container status \"4a8c08e5d062ca8982d05a5db501e05bd9ecbe0c914834d7d5c190ff7d05df0c\": rpc error: code = NotFound desc = could not find container \"4a8c08e5d062ca8982d05a5db501e05bd9ecbe0c914834d7d5c190ff7d05df0c\": container with ID starting with 4a8c08e5d062ca8982d05a5db501e05bd9ecbe0c914834d7d5c190ff7d05df0c not found: ID does not exist" Oct 06 22:43:59 crc kubenswrapper[5014]: I1006 22:43:59.098766 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jmnx7"] Oct 06 22:43:59 crc kubenswrapper[5014]: I1006 22:43:59.503950 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09352ab1-154c-4119-b60b-cedbec798e24" path="/var/lib/kubelet/pods/09352ab1-154c-4119-b60b-cedbec798e24/volumes" Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.233541 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jmnx7" podUID="b5612ea0-3c83-456e-b578-59f288fdf1b7" containerName="registry-server" containerID="cri-o://9b070d928cbb66182ec0dad771f32b6c4ac9c787513efafa8d9086a75c9d1ced" gracePeriod=2 Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.495159 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-m86f8"] Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.495488 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-m86f8" podUID="16c3f436-465f-4853-a45e-c4c42d880bfe" containerName="registry-server" containerID="cri-o://180e533a1d2765a56b7a3045baba6bb58ebdc2fc0255c2b59590f6dcceff8f00" gracePeriod=2 Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.774399 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.833229 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5612ea0-3c83-456e-b578-59f288fdf1b7-catalog-content\") pod \"b5612ea0-3c83-456e-b578-59f288fdf1b7\" (UID: \"b5612ea0-3c83-456e-b578-59f288fdf1b7\") " Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.833281 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5612ea0-3c83-456e-b578-59f288fdf1b7-utilities\") pod \"b5612ea0-3c83-456e-b578-59f288fdf1b7\" (UID: \"b5612ea0-3c83-456e-b578-59f288fdf1b7\") " Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.833335 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2wrb\" (UniqueName: \"kubernetes.io/projected/b5612ea0-3c83-456e-b578-59f288fdf1b7-kube-api-access-p2wrb\") pod \"b5612ea0-3c83-456e-b578-59f288fdf1b7\" (UID: \"b5612ea0-3c83-456e-b578-59f288fdf1b7\") " Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.840821 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5612ea0-3c83-456e-b578-59f288fdf1b7-kube-api-access-p2wrb" (OuterVolumeSpecName: "kube-api-access-p2wrb") pod "b5612ea0-3c83-456e-b578-59f288fdf1b7" (UID: "b5612ea0-3c83-456e-b578-59f288fdf1b7"). InnerVolumeSpecName "kube-api-access-p2wrb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.841787 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5612ea0-3c83-456e-b578-59f288fdf1b7-utilities" (OuterVolumeSpecName: "utilities") pod "b5612ea0-3c83-456e-b578-59f288fdf1b7" (UID: "b5612ea0-3c83-456e-b578-59f288fdf1b7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.877247 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5612ea0-3c83-456e-b578-59f288fdf1b7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b5612ea0-3c83-456e-b578-59f288fdf1b7" (UID: "b5612ea0-3c83-456e-b578-59f288fdf1b7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.928195 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.934405 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5zt26\" (UniqueName: \"kubernetes.io/projected/16c3f436-465f-4853-a45e-c4c42d880bfe-kube-api-access-5zt26\") pod \"16c3f436-465f-4853-a45e-c4c42d880bfe\" (UID: \"16c3f436-465f-4853-a45e-c4c42d880bfe\") " Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.934445 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16c3f436-465f-4853-a45e-c4c42d880bfe-utilities\") pod \"16c3f436-465f-4853-a45e-c4c42d880bfe\" (UID: \"16c3f436-465f-4853-a45e-c4c42d880bfe\") " Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.934483 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16c3f436-465f-4853-a45e-c4c42d880bfe-catalog-content\") pod \"16c3f436-465f-4853-a45e-c4c42d880bfe\" (UID: \"16c3f436-465f-4853-a45e-c4c42d880bfe\") " Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.934794 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5612ea0-3c83-456e-b578-59f288fdf1b7-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.934811 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5612ea0-3c83-456e-b578-59f288fdf1b7-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.934823 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2wrb\" (UniqueName: \"kubernetes.io/projected/b5612ea0-3c83-456e-b578-59f288fdf1b7-kube-api-access-p2wrb\") on node \"crc\" DevicePath \"\"" Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.935588 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16c3f436-465f-4853-a45e-c4c42d880bfe-utilities" (OuterVolumeSpecName: "utilities") pod "16c3f436-465f-4853-a45e-c4c42d880bfe" (UID: "16c3f436-465f-4853-a45e-c4c42d880bfe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.937579 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16c3f436-465f-4853-a45e-c4c42d880bfe-kube-api-access-5zt26" (OuterVolumeSpecName: "kube-api-access-5zt26") pod "16c3f436-465f-4853-a45e-c4c42d880bfe" (UID: "16c3f436-465f-4853-a45e-c4c42d880bfe"). InnerVolumeSpecName "kube-api-access-5zt26". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:44:00 crc kubenswrapper[5014]: I1006 22:44:00.992948 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16c3f436-465f-4853-a45e-c4c42d880bfe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "16c3f436-465f-4853-a45e-c4c42d880bfe" (UID: "16c3f436-465f-4853-a45e-c4c42d880bfe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.035752 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16c3f436-465f-4853-a45e-c4c42d880bfe-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.035796 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5zt26\" (UniqueName: \"kubernetes.io/projected/16c3f436-465f-4853-a45e-c4c42d880bfe-kube-api-access-5zt26\") on node \"crc\" DevicePath \"\"" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.035809 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16c3f436-465f-4853-a45e-c4c42d880bfe-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.249286 5014 generic.go:334] "Generic (PLEG): container finished" podID="b5612ea0-3c83-456e-b578-59f288fdf1b7" containerID="9b070d928cbb66182ec0dad771f32b6c4ac9c787513efafa8d9086a75c9d1ced" exitCode=0 Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.249412 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jmnx7" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.249467 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jmnx7" event={"ID":"b5612ea0-3c83-456e-b578-59f288fdf1b7","Type":"ContainerDied","Data":"9b070d928cbb66182ec0dad771f32b6c4ac9c787513efafa8d9086a75c9d1ced"} Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.250686 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jmnx7" event={"ID":"b5612ea0-3c83-456e-b578-59f288fdf1b7","Type":"ContainerDied","Data":"9de68e5f61463b0539d0a47ee387439d139408f353fcf8dcacc8a80f99ed1088"} Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.250724 5014 scope.go:117] "RemoveContainer" containerID="9b070d928cbb66182ec0dad771f32b6c4ac9c787513efafa8d9086a75c9d1ced" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.256312 5014 generic.go:334] "Generic (PLEG): container finished" podID="16c3f436-465f-4853-a45e-c4c42d880bfe" containerID="180e533a1d2765a56b7a3045baba6bb58ebdc2fc0255c2b59590f6dcceff8f00" exitCode=0 Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.256386 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m86f8" event={"ID":"16c3f436-465f-4853-a45e-c4c42d880bfe","Type":"ContainerDied","Data":"180e533a1d2765a56b7a3045baba6bb58ebdc2fc0255c2b59590f6dcceff8f00"} Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.256419 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m86f8" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.256439 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m86f8" event={"ID":"16c3f436-465f-4853-a45e-c4c42d880bfe","Type":"ContainerDied","Data":"a8af4086bc2f8408791b70813da357bb6b4c5770430f634b049cd285ab208340"} Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.288201 5014 scope.go:117] "RemoveContainer" containerID="a34d01a0e0cd39739acc0542f649882b2216b6df2f6101331d8573eb39c82ae4" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.296411 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-m86f8"] Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.313079 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-m86f8"] Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.320035 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jmnx7"] Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.325937 5014 scope.go:117] "RemoveContainer" containerID="a524647b45f746745d5f7667d15ca187e40a5d56e355ab8c635e1c59a0c057e6" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.327493 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jmnx7"] Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.349012 5014 scope.go:117] "RemoveContainer" containerID="9b070d928cbb66182ec0dad771f32b6c4ac9c787513efafa8d9086a75c9d1ced" Oct 06 22:44:01 crc kubenswrapper[5014]: E1006 22:44:01.349484 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b070d928cbb66182ec0dad771f32b6c4ac9c787513efafa8d9086a75c9d1ced\": container with ID starting with 9b070d928cbb66182ec0dad771f32b6c4ac9c787513efafa8d9086a75c9d1ced not found: ID does not exist" containerID="9b070d928cbb66182ec0dad771f32b6c4ac9c787513efafa8d9086a75c9d1ced" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.349528 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b070d928cbb66182ec0dad771f32b6c4ac9c787513efafa8d9086a75c9d1ced"} err="failed to get container status \"9b070d928cbb66182ec0dad771f32b6c4ac9c787513efafa8d9086a75c9d1ced\": rpc error: code = NotFound desc = could not find container \"9b070d928cbb66182ec0dad771f32b6c4ac9c787513efafa8d9086a75c9d1ced\": container with ID starting with 9b070d928cbb66182ec0dad771f32b6c4ac9c787513efafa8d9086a75c9d1ced not found: ID does not exist" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.349559 5014 scope.go:117] "RemoveContainer" containerID="a34d01a0e0cd39739acc0542f649882b2216b6df2f6101331d8573eb39c82ae4" Oct 06 22:44:01 crc kubenswrapper[5014]: E1006 22:44:01.349844 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a34d01a0e0cd39739acc0542f649882b2216b6df2f6101331d8573eb39c82ae4\": container with ID starting with a34d01a0e0cd39739acc0542f649882b2216b6df2f6101331d8573eb39c82ae4 not found: ID does not exist" containerID="a34d01a0e0cd39739acc0542f649882b2216b6df2f6101331d8573eb39c82ae4" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.349873 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a34d01a0e0cd39739acc0542f649882b2216b6df2f6101331d8573eb39c82ae4"} err="failed to get container status \"a34d01a0e0cd39739acc0542f649882b2216b6df2f6101331d8573eb39c82ae4\": rpc error: code = NotFound desc = could not find container \"a34d01a0e0cd39739acc0542f649882b2216b6df2f6101331d8573eb39c82ae4\": container with ID starting with a34d01a0e0cd39739acc0542f649882b2216b6df2f6101331d8573eb39c82ae4 not found: ID does not exist" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.349896 5014 scope.go:117] "RemoveContainer" containerID="a524647b45f746745d5f7667d15ca187e40a5d56e355ab8c635e1c59a0c057e6" Oct 06 22:44:01 crc kubenswrapper[5014]: E1006 22:44:01.350106 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a524647b45f746745d5f7667d15ca187e40a5d56e355ab8c635e1c59a0c057e6\": container with ID starting with a524647b45f746745d5f7667d15ca187e40a5d56e355ab8c635e1c59a0c057e6 not found: ID does not exist" containerID="a524647b45f746745d5f7667d15ca187e40a5d56e355ab8c635e1c59a0c057e6" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.350136 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a524647b45f746745d5f7667d15ca187e40a5d56e355ab8c635e1c59a0c057e6"} err="failed to get container status \"a524647b45f746745d5f7667d15ca187e40a5d56e355ab8c635e1c59a0c057e6\": rpc error: code = NotFound desc = could not find container \"a524647b45f746745d5f7667d15ca187e40a5d56e355ab8c635e1c59a0c057e6\": container with ID starting with a524647b45f746745d5f7667d15ca187e40a5d56e355ab8c635e1c59a0c057e6 not found: ID does not exist" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.350157 5014 scope.go:117] "RemoveContainer" containerID="180e533a1d2765a56b7a3045baba6bb58ebdc2fc0255c2b59590f6dcceff8f00" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.404548 5014 scope.go:117] "RemoveContainer" containerID="dc33770cd6fd248143d949231a0f237a51790d698a8410d87e73b812f2a7c382" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.424531 5014 scope.go:117] "RemoveContainer" containerID="16b4b5ff668a9b05717afc2668f9b3c82a0e148b526643994fccfd5e51240fcb" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.452117 5014 scope.go:117] "RemoveContainer" containerID="180e533a1d2765a56b7a3045baba6bb58ebdc2fc0255c2b59590f6dcceff8f00" Oct 06 22:44:01 crc kubenswrapper[5014]: E1006 22:44:01.452943 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"180e533a1d2765a56b7a3045baba6bb58ebdc2fc0255c2b59590f6dcceff8f00\": container with ID starting with 180e533a1d2765a56b7a3045baba6bb58ebdc2fc0255c2b59590f6dcceff8f00 not found: ID does not exist" containerID="180e533a1d2765a56b7a3045baba6bb58ebdc2fc0255c2b59590f6dcceff8f00" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.452988 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"180e533a1d2765a56b7a3045baba6bb58ebdc2fc0255c2b59590f6dcceff8f00"} err="failed to get container status \"180e533a1d2765a56b7a3045baba6bb58ebdc2fc0255c2b59590f6dcceff8f00\": rpc error: code = NotFound desc = could not find container \"180e533a1d2765a56b7a3045baba6bb58ebdc2fc0255c2b59590f6dcceff8f00\": container with ID starting with 180e533a1d2765a56b7a3045baba6bb58ebdc2fc0255c2b59590f6dcceff8f00 not found: ID does not exist" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.453021 5014 scope.go:117] "RemoveContainer" containerID="dc33770cd6fd248143d949231a0f237a51790d698a8410d87e73b812f2a7c382" Oct 06 22:44:01 crc kubenswrapper[5014]: E1006 22:44:01.453392 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc33770cd6fd248143d949231a0f237a51790d698a8410d87e73b812f2a7c382\": container with ID starting with dc33770cd6fd248143d949231a0f237a51790d698a8410d87e73b812f2a7c382 not found: ID does not exist" containerID="dc33770cd6fd248143d949231a0f237a51790d698a8410d87e73b812f2a7c382" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.453436 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc33770cd6fd248143d949231a0f237a51790d698a8410d87e73b812f2a7c382"} err="failed to get container status \"dc33770cd6fd248143d949231a0f237a51790d698a8410d87e73b812f2a7c382\": rpc error: code = NotFound desc = could not find container \"dc33770cd6fd248143d949231a0f237a51790d698a8410d87e73b812f2a7c382\": container with ID starting with dc33770cd6fd248143d949231a0f237a51790d698a8410d87e73b812f2a7c382 not found: ID does not exist" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.453464 5014 scope.go:117] "RemoveContainer" containerID="16b4b5ff668a9b05717afc2668f9b3c82a0e148b526643994fccfd5e51240fcb" Oct 06 22:44:01 crc kubenswrapper[5014]: E1006 22:44:01.453834 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16b4b5ff668a9b05717afc2668f9b3c82a0e148b526643994fccfd5e51240fcb\": container with ID starting with 16b4b5ff668a9b05717afc2668f9b3c82a0e148b526643994fccfd5e51240fcb not found: ID does not exist" containerID="16b4b5ff668a9b05717afc2668f9b3c82a0e148b526643994fccfd5e51240fcb" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.453874 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16b4b5ff668a9b05717afc2668f9b3c82a0e148b526643994fccfd5e51240fcb"} err="failed to get container status \"16b4b5ff668a9b05717afc2668f9b3c82a0e148b526643994fccfd5e51240fcb\": rpc error: code = NotFound desc = could not find container \"16b4b5ff668a9b05717afc2668f9b3c82a0e148b526643994fccfd5e51240fcb\": container with ID starting with 16b4b5ff668a9b05717afc2668f9b3c82a0e148b526643994fccfd5e51240fcb not found: ID does not exist" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.500975 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16c3f436-465f-4853-a45e-c4c42d880bfe" path="/var/lib/kubelet/pods/16c3f436-465f-4853-a45e-c4c42d880bfe/volumes" Oct 06 22:44:01 crc kubenswrapper[5014]: I1006 22:44:01.502194 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5612ea0-3c83-456e-b578-59f288fdf1b7" path="/var/lib/kubelet/pods/b5612ea0-3c83-456e-b578-59f288fdf1b7/volumes" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.181004 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg"] Oct 06 22:45:00 crc kubenswrapper[5014]: E1006 22:45:00.183065 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16c3f436-465f-4853-a45e-c4c42d880bfe" containerName="registry-server" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.183103 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="16c3f436-465f-4853-a45e-c4c42d880bfe" containerName="registry-server" Oct 06 22:45:00 crc kubenswrapper[5014]: E1006 22:45:00.183137 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16c3f436-465f-4853-a45e-c4c42d880bfe" containerName="extract-utilities" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.183157 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="16c3f436-465f-4853-a45e-c4c42d880bfe" containerName="extract-utilities" Oct 06 22:45:00 crc kubenswrapper[5014]: E1006 22:45:00.183184 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09352ab1-154c-4119-b60b-cedbec798e24" containerName="registry-server" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.183202 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="09352ab1-154c-4119-b60b-cedbec798e24" containerName="registry-server" Oct 06 22:45:00 crc kubenswrapper[5014]: E1006 22:45:00.183222 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09352ab1-154c-4119-b60b-cedbec798e24" containerName="extract-utilities" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.183237 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="09352ab1-154c-4119-b60b-cedbec798e24" containerName="extract-utilities" Oct 06 22:45:00 crc kubenswrapper[5014]: E1006 22:45:00.183276 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5612ea0-3c83-456e-b578-59f288fdf1b7" containerName="extract-content" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.183294 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5612ea0-3c83-456e-b578-59f288fdf1b7" containerName="extract-content" Oct 06 22:45:00 crc kubenswrapper[5014]: E1006 22:45:00.183326 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5612ea0-3c83-456e-b578-59f288fdf1b7" containerName="registry-server" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.183346 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5612ea0-3c83-456e-b578-59f288fdf1b7" containerName="registry-server" Oct 06 22:45:00 crc kubenswrapper[5014]: E1006 22:45:00.183363 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09352ab1-154c-4119-b60b-cedbec798e24" containerName="extract-content" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.183380 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="09352ab1-154c-4119-b60b-cedbec798e24" containerName="extract-content" Oct 06 22:45:00 crc kubenswrapper[5014]: E1006 22:45:00.183411 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16c3f436-465f-4853-a45e-c4c42d880bfe" containerName="extract-content" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.183427 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="16c3f436-465f-4853-a45e-c4c42d880bfe" containerName="extract-content" Oct 06 22:45:00 crc kubenswrapper[5014]: E1006 22:45:00.183471 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5612ea0-3c83-456e-b578-59f288fdf1b7" containerName="extract-utilities" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.183492 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5612ea0-3c83-456e-b578-59f288fdf1b7" containerName="extract-utilities" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.183906 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="09352ab1-154c-4119-b60b-cedbec798e24" containerName="registry-server" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.183947 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5612ea0-3c83-456e-b578-59f288fdf1b7" containerName="registry-server" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.183980 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="16c3f436-465f-4853-a45e-c4c42d880bfe" containerName="registry-server" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.185327 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.188761 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.189119 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.194583 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg"] Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.336547 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cab5b758-f076-4e92-a179-db9f5d764da2-config-volume\") pod \"collect-profiles-29329845-bk6bg\" (UID: \"cab5b758-f076-4e92-a179-db9f5d764da2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.336693 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8kd9\" (UniqueName: \"kubernetes.io/projected/cab5b758-f076-4e92-a179-db9f5d764da2-kube-api-access-x8kd9\") pod \"collect-profiles-29329845-bk6bg\" (UID: \"cab5b758-f076-4e92-a179-db9f5d764da2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.336745 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cab5b758-f076-4e92-a179-db9f5d764da2-secret-volume\") pod \"collect-profiles-29329845-bk6bg\" (UID: \"cab5b758-f076-4e92-a179-db9f5d764da2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.438607 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cab5b758-f076-4e92-a179-db9f5d764da2-config-volume\") pod \"collect-profiles-29329845-bk6bg\" (UID: \"cab5b758-f076-4e92-a179-db9f5d764da2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.438730 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8kd9\" (UniqueName: \"kubernetes.io/projected/cab5b758-f076-4e92-a179-db9f5d764da2-kube-api-access-x8kd9\") pod \"collect-profiles-29329845-bk6bg\" (UID: \"cab5b758-f076-4e92-a179-db9f5d764da2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.438774 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cab5b758-f076-4e92-a179-db9f5d764da2-secret-volume\") pod \"collect-profiles-29329845-bk6bg\" (UID: \"cab5b758-f076-4e92-a179-db9f5d764da2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.440427 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cab5b758-f076-4e92-a179-db9f5d764da2-config-volume\") pod \"collect-profiles-29329845-bk6bg\" (UID: \"cab5b758-f076-4e92-a179-db9f5d764da2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.446462 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cab5b758-f076-4e92-a179-db9f5d764da2-secret-volume\") pod \"collect-profiles-29329845-bk6bg\" (UID: \"cab5b758-f076-4e92-a179-db9f5d764da2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.461892 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8kd9\" (UniqueName: \"kubernetes.io/projected/cab5b758-f076-4e92-a179-db9f5d764da2-kube-api-access-x8kd9\") pod \"collect-profiles-29329845-bk6bg\" (UID: \"cab5b758-f076-4e92-a179-db9f5d764da2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.524924 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg" Oct 06 22:45:00 crc kubenswrapper[5014]: I1006 22:45:00.842032 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg"] Oct 06 22:45:01 crc kubenswrapper[5014]: I1006 22:45:01.852596 5014 generic.go:334] "Generic (PLEG): container finished" podID="cab5b758-f076-4e92-a179-db9f5d764da2" containerID="b94dde9f75bc5849329a8f78ff602a357a7494f3691620df725cebe284778437" exitCode=0 Oct 06 22:45:01 crc kubenswrapper[5014]: I1006 22:45:01.852875 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg" event={"ID":"cab5b758-f076-4e92-a179-db9f5d764da2","Type":"ContainerDied","Data":"b94dde9f75bc5849329a8f78ff602a357a7494f3691620df725cebe284778437"} Oct 06 22:45:01 crc kubenswrapper[5014]: I1006 22:45:01.853155 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg" event={"ID":"cab5b758-f076-4e92-a179-db9f5d764da2","Type":"ContainerStarted","Data":"763fcbabeb618ec7aee2c477aaea30cda7cac87a7bb081ec309713ede2684817"} Oct 06 22:45:03 crc kubenswrapper[5014]: I1006 22:45:03.187318 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg" Oct 06 22:45:03 crc kubenswrapper[5014]: I1006 22:45:03.307358 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8kd9\" (UniqueName: \"kubernetes.io/projected/cab5b758-f076-4e92-a179-db9f5d764da2-kube-api-access-x8kd9\") pod \"cab5b758-f076-4e92-a179-db9f5d764da2\" (UID: \"cab5b758-f076-4e92-a179-db9f5d764da2\") " Oct 06 22:45:03 crc kubenswrapper[5014]: I1006 22:45:03.307508 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cab5b758-f076-4e92-a179-db9f5d764da2-config-volume\") pod \"cab5b758-f076-4e92-a179-db9f5d764da2\" (UID: \"cab5b758-f076-4e92-a179-db9f5d764da2\") " Oct 06 22:45:03 crc kubenswrapper[5014]: I1006 22:45:03.307546 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cab5b758-f076-4e92-a179-db9f5d764da2-secret-volume\") pod \"cab5b758-f076-4e92-a179-db9f5d764da2\" (UID: \"cab5b758-f076-4e92-a179-db9f5d764da2\") " Oct 06 22:45:03 crc kubenswrapper[5014]: I1006 22:45:03.309915 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cab5b758-f076-4e92-a179-db9f5d764da2-config-volume" (OuterVolumeSpecName: "config-volume") pod "cab5b758-f076-4e92-a179-db9f5d764da2" (UID: "cab5b758-f076-4e92-a179-db9f5d764da2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:45:03 crc kubenswrapper[5014]: I1006 22:45:03.316231 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cab5b758-f076-4e92-a179-db9f5d764da2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "cab5b758-f076-4e92-a179-db9f5d764da2" (UID: "cab5b758-f076-4e92-a179-db9f5d764da2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 22:45:03 crc kubenswrapper[5014]: I1006 22:45:03.317062 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cab5b758-f076-4e92-a179-db9f5d764da2-kube-api-access-x8kd9" (OuterVolumeSpecName: "kube-api-access-x8kd9") pod "cab5b758-f076-4e92-a179-db9f5d764da2" (UID: "cab5b758-f076-4e92-a179-db9f5d764da2"). InnerVolumeSpecName "kube-api-access-x8kd9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:45:03 crc kubenswrapper[5014]: I1006 22:45:03.409482 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8kd9\" (UniqueName: \"kubernetes.io/projected/cab5b758-f076-4e92-a179-db9f5d764da2-kube-api-access-x8kd9\") on node \"crc\" DevicePath \"\"" Oct 06 22:45:03 crc kubenswrapper[5014]: I1006 22:45:03.409537 5014 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cab5b758-f076-4e92-a179-db9f5d764da2-config-volume\") on node \"crc\" DevicePath \"\"" Oct 06 22:45:03 crc kubenswrapper[5014]: I1006 22:45:03.409558 5014 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cab5b758-f076-4e92-a179-db9f5d764da2-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 06 22:45:03 crc kubenswrapper[5014]: I1006 22:45:03.878305 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg" event={"ID":"cab5b758-f076-4e92-a179-db9f5d764da2","Type":"ContainerDied","Data":"763fcbabeb618ec7aee2c477aaea30cda7cac87a7bb081ec309713ede2684817"} Oct 06 22:45:03 crc kubenswrapper[5014]: I1006 22:45:03.878377 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="763fcbabeb618ec7aee2c477aaea30cda7cac87a7bb081ec309713ede2684817" Oct 06 22:45:03 crc kubenswrapper[5014]: I1006 22:45:03.878848 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329845-bk6bg" Oct 06 22:45:04 crc kubenswrapper[5014]: I1006 22:45:04.292936 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc"] Oct 06 22:45:04 crc kubenswrapper[5014]: I1006 22:45:04.303390 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329800-96tfc"] Oct 06 22:45:05 crc kubenswrapper[5014]: I1006 22:45:05.500888 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd2b1a65-b02a-423f-b86f-e581dbdb0367" path="/var/lib/kubelet/pods/bd2b1a65-b02a-423f-b86f-e581dbdb0367/volumes" Oct 06 22:45:21 crc kubenswrapper[5014]: I1006 22:45:21.735385 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:45:21 crc kubenswrapper[5014]: I1006 22:45:21.735892 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:45:51 crc kubenswrapper[5014]: I1006 22:45:51.735508 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:45:51 crc kubenswrapper[5014]: I1006 22:45:51.736224 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:46:02 crc kubenswrapper[5014]: I1006 22:46:02.663694 5014 scope.go:117] "RemoveContainer" containerID="0e5ab00455d56f476cd0f04f27664546c93179954790f83d19c18bc3546f1d07" Oct 06 22:46:21 crc kubenswrapper[5014]: I1006 22:46:21.735980 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:46:21 crc kubenswrapper[5014]: I1006 22:46:21.737057 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:46:21 crc kubenswrapper[5014]: I1006 22:46:21.737153 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 22:46:21 crc kubenswrapper[5014]: I1006 22:46:21.738423 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 22:46:21 crc kubenswrapper[5014]: I1006 22:46:21.738539 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" gracePeriod=600 Oct 06 22:46:21 crc kubenswrapper[5014]: E1006 22:46:21.872080 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:46:22 crc kubenswrapper[5014]: I1006 22:46:22.665688 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" exitCode=0 Oct 06 22:46:22 crc kubenswrapper[5014]: I1006 22:46:22.665832 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae"} Oct 06 22:46:22 crc kubenswrapper[5014]: I1006 22:46:22.666198 5014 scope.go:117] "RemoveContainer" containerID="e797c99f3d7250575c0fe41e7b21e8ec126de5e18dd0bbd484c427d2d4fa6e36" Oct 06 22:46:22 crc kubenswrapper[5014]: I1006 22:46:22.666814 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:46:22 crc kubenswrapper[5014]: E1006 22:46:22.667152 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:46:37 crc kubenswrapper[5014]: I1006 22:46:37.485376 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:46:37 crc kubenswrapper[5014]: E1006 22:46:37.486396 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:46:48 crc kubenswrapper[5014]: I1006 22:46:48.485059 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:46:48 crc kubenswrapper[5014]: E1006 22:46:48.486027 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:46:59 crc kubenswrapper[5014]: I1006 22:46:59.485580 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:46:59 crc kubenswrapper[5014]: E1006 22:46:59.487454 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:47:13 crc kubenswrapper[5014]: I1006 22:47:13.484755 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:47:13 crc kubenswrapper[5014]: E1006 22:47:13.485680 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:47:28 crc kubenswrapper[5014]: I1006 22:47:28.484872 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:47:28 crc kubenswrapper[5014]: E1006 22:47:28.486022 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:47:43 crc kubenswrapper[5014]: I1006 22:47:43.484810 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:47:43 crc kubenswrapper[5014]: E1006 22:47:43.485588 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:47:57 crc kubenswrapper[5014]: I1006 22:47:57.484952 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:47:57 crc kubenswrapper[5014]: E1006 22:47:57.486280 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:48:10 crc kubenswrapper[5014]: I1006 22:48:10.485225 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:48:10 crc kubenswrapper[5014]: E1006 22:48:10.486372 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:48:21 crc kubenswrapper[5014]: I1006 22:48:21.485782 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:48:21 crc kubenswrapper[5014]: E1006 22:48:21.486833 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:48:33 crc kubenswrapper[5014]: I1006 22:48:33.484886 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:48:33 crc kubenswrapper[5014]: E1006 22:48:33.486277 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:48:45 crc kubenswrapper[5014]: I1006 22:48:45.491945 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:48:45 crc kubenswrapper[5014]: E1006 22:48:45.492983 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:49:00 crc kubenswrapper[5014]: I1006 22:49:00.485651 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:49:00 crc kubenswrapper[5014]: E1006 22:49:00.488719 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:49:15 crc kubenswrapper[5014]: I1006 22:49:15.492660 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:49:15 crc kubenswrapper[5014]: E1006 22:49:15.499226 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:49:30 crc kubenswrapper[5014]: I1006 22:49:30.484355 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:49:30 crc kubenswrapper[5014]: E1006 22:49:30.485196 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:49:41 crc kubenswrapper[5014]: I1006 22:49:41.484571 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:49:41 crc kubenswrapper[5014]: E1006 22:49:41.485415 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:49:55 crc kubenswrapper[5014]: I1006 22:49:55.501174 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:49:55 crc kubenswrapper[5014]: E1006 22:49:55.502066 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:50:08 crc kubenswrapper[5014]: I1006 22:50:08.485474 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:50:08 crc kubenswrapper[5014]: E1006 22:50:08.486852 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.027414 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-tsprc"] Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.036392 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-tsprc"] Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.157977 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-95hfk"] Oct 06 22:50:19 crc kubenswrapper[5014]: E1006 22:50:19.158563 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cab5b758-f076-4e92-a179-db9f5d764da2" containerName="collect-profiles" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.158606 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="cab5b758-f076-4e92-a179-db9f5d764da2" containerName="collect-profiles" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.159052 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="cab5b758-f076-4e92-a179-db9f5d764da2" containerName="collect-profiles" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.160073 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-95hfk" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.163767 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.164474 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.164556 5014 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-vrc8d" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.168337 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.170374 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-95hfk"] Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.239300 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2bdca7c7-f000-45ff-8bf0-099941895915-node-mnt\") pod \"crc-storage-crc-95hfk\" (UID: \"2bdca7c7-f000-45ff-8bf0-099941895915\") " pod="crc-storage/crc-storage-crc-95hfk" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.239599 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8lk4\" (UniqueName: \"kubernetes.io/projected/2bdca7c7-f000-45ff-8bf0-099941895915-kube-api-access-p8lk4\") pod \"crc-storage-crc-95hfk\" (UID: \"2bdca7c7-f000-45ff-8bf0-099941895915\") " pod="crc-storage/crc-storage-crc-95hfk" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.239703 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2bdca7c7-f000-45ff-8bf0-099941895915-crc-storage\") pod \"crc-storage-crc-95hfk\" (UID: \"2bdca7c7-f000-45ff-8bf0-099941895915\") " pod="crc-storage/crc-storage-crc-95hfk" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.341297 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8lk4\" (UniqueName: \"kubernetes.io/projected/2bdca7c7-f000-45ff-8bf0-099941895915-kube-api-access-p8lk4\") pod \"crc-storage-crc-95hfk\" (UID: \"2bdca7c7-f000-45ff-8bf0-099941895915\") " pod="crc-storage/crc-storage-crc-95hfk" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.341370 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2bdca7c7-f000-45ff-8bf0-099941895915-crc-storage\") pod \"crc-storage-crc-95hfk\" (UID: \"2bdca7c7-f000-45ff-8bf0-099941895915\") " pod="crc-storage/crc-storage-crc-95hfk" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.341446 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2bdca7c7-f000-45ff-8bf0-099941895915-node-mnt\") pod \"crc-storage-crc-95hfk\" (UID: \"2bdca7c7-f000-45ff-8bf0-099941895915\") " pod="crc-storage/crc-storage-crc-95hfk" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.341786 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2bdca7c7-f000-45ff-8bf0-099941895915-node-mnt\") pod \"crc-storage-crc-95hfk\" (UID: \"2bdca7c7-f000-45ff-8bf0-099941895915\") " pod="crc-storage/crc-storage-crc-95hfk" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.342881 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2bdca7c7-f000-45ff-8bf0-099941895915-crc-storage\") pod \"crc-storage-crc-95hfk\" (UID: \"2bdca7c7-f000-45ff-8bf0-099941895915\") " pod="crc-storage/crc-storage-crc-95hfk" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.375010 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8lk4\" (UniqueName: \"kubernetes.io/projected/2bdca7c7-f000-45ff-8bf0-099941895915-kube-api-access-p8lk4\") pod \"crc-storage-crc-95hfk\" (UID: \"2bdca7c7-f000-45ff-8bf0-099941895915\") " pod="crc-storage/crc-storage-crc-95hfk" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.481717 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-95hfk" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.503452 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5447fba0-38f4-4e6e-b891-f44e97259e21" path="/var/lib/kubelet/pods/5447fba0-38f4-4e6e-b891-f44e97259e21/volumes" Oct 06 22:50:19 crc kubenswrapper[5014]: I1006 22:50:19.960907 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-95hfk"] Oct 06 22:50:20 crc kubenswrapper[5014]: I1006 22:50:20.118801 5014 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 06 22:50:20 crc kubenswrapper[5014]: I1006 22:50:20.894459 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-95hfk" event={"ID":"2bdca7c7-f000-45ff-8bf0-099941895915","Type":"ContainerStarted","Data":"f3a2fa6a0f6ff803b4b8a2df4af49114abae43d8e09782610e2afac9466e7619"} Oct 06 22:50:21 crc kubenswrapper[5014]: I1006 22:50:21.485016 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:50:21 crc kubenswrapper[5014]: E1006 22:50:21.489372 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:50:21 crc kubenswrapper[5014]: I1006 22:50:21.906272 5014 generic.go:334] "Generic (PLEG): container finished" podID="2bdca7c7-f000-45ff-8bf0-099941895915" containerID="e7a7316e2c8877d1734a014323f6fea5c478a06c0b8281a9a9e8a4fb416a74fe" exitCode=0 Oct 06 22:50:21 crc kubenswrapper[5014]: I1006 22:50:21.906333 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-95hfk" event={"ID":"2bdca7c7-f000-45ff-8bf0-099941895915","Type":"ContainerDied","Data":"e7a7316e2c8877d1734a014323f6fea5c478a06c0b8281a9a9e8a4fb416a74fe"} Oct 06 22:50:23 crc kubenswrapper[5014]: I1006 22:50:23.303484 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-95hfk" Oct 06 22:50:23 crc kubenswrapper[5014]: I1006 22:50:23.505033 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2bdca7c7-f000-45ff-8bf0-099941895915-node-mnt\") pod \"2bdca7c7-f000-45ff-8bf0-099941895915\" (UID: \"2bdca7c7-f000-45ff-8bf0-099941895915\") " Oct 06 22:50:23 crc kubenswrapper[5014]: I1006 22:50:23.505133 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2bdca7c7-f000-45ff-8bf0-099941895915-crc-storage\") pod \"2bdca7c7-f000-45ff-8bf0-099941895915\" (UID: \"2bdca7c7-f000-45ff-8bf0-099941895915\") " Oct 06 22:50:23 crc kubenswrapper[5014]: I1006 22:50:23.505170 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8lk4\" (UniqueName: \"kubernetes.io/projected/2bdca7c7-f000-45ff-8bf0-099941895915-kube-api-access-p8lk4\") pod \"2bdca7c7-f000-45ff-8bf0-099941895915\" (UID: \"2bdca7c7-f000-45ff-8bf0-099941895915\") " Oct 06 22:50:23 crc kubenswrapper[5014]: I1006 22:50:23.508224 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2bdca7c7-f000-45ff-8bf0-099941895915-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "2bdca7c7-f000-45ff-8bf0-099941895915" (UID: "2bdca7c7-f000-45ff-8bf0-099941895915"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 22:50:23 crc kubenswrapper[5014]: I1006 22:50:23.517975 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bdca7c7-f000-45ff-8bf0-099941895915-kube-api-access-p8lk4" (OuterVolumeSpecName: "kube-api-access-p8lk4") pod "2bdca7c7-f000-45ff-8bf0-099941895915" (UID: "2bdca7c7-f000-45ff-8bf0-099941895915"). InnerVolumeSpecName "kube-api-access-p8lk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:50:23 crc kubenswrapper[5014]: I1006 22:50:23.545549 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2bdca7c7-f000-45ff-8bf0-099941895915-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "2bdca7c7-f000-45ff-8bf0-099941895915" (UID: "2bdca7c7-f000-45ff-8bf0-099941895915"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:50:23 crc kubenswrapper[5014]: I1006 22:50:23.606346 5014 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2bdca7c7-f000-45ff-8bf0-099941895915-crc-storage\") on node \"crc\" DevicePath \"\"" Oct 06 22:50:23 crc kubenswrapper[5014]: I1006 22:50:23.606379 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8lk4\" (UniqueName: \"kubernetes.io/projected/2bdca7c7-f000-45ff-8bf0-099941895915-kube-api-access-p8lk4\") on node \"crc\" DevicePath \"\"" Oct 06 22:50:23 crc kubenswrapper[5014]: I1006 22:50:23.606392 5014 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2bdca7c7-f000-45ff-8bf0-099941895915-node-mnt\") on node \"crc\" DevicePath \"\"" Oct 06 22:50:23 crc kubenswrapper[5014]: I1006 22:50:23.928188 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-95hfk" event={"ID":"2bdca7c7-f000-45ff-8bf0-099941895915","Type":"ContainerDied","Data":"f3a2fa6a0f6ff803b4b8a2df4af49114abae43d8e09782610e2afac9466e7619"} Oct 06 22:50:23 crc kubenswrapper[5014]: I1006 22:50:23.928245 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-95hfk" Oct 06 22:50:23 crc kubenswrapper[5014]: I1006 22:50:23.928247 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f3a2fa6a0f6ff803b4b8a2df4af49114abae43d8e09782610e2afac9466e7619" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.620825 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-95hfk"] Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.627068 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-95hfk"] Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.754121 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-9rcns"] Oct 06 22:50:25 crc kubenswrapper[5014]: E1006 22:50:25.754518 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bdca7c7-f000-45ff-8bf0-099941895915" containerName="storage" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.754542 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bdca7c7-f000-45ff-8bf0-099941895915" containerName="storage" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.754763 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bdca7c7-f000-45ff-8bf0-099941895915" containerName="storage" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.755416 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-9rcns" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.758466 5014 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-vrc8d" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.758487 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.759465 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.759845 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.771400 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-9rcns"] Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.844290 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/865c6998-861a-4ec0-a4d6-6686f7867c81-crc-storage\") pod \"crc-storage-crc-9rcns\" (UID: \"865c6998-861a-4ec0-a4d6-6686f7867c81\") " pod="crc-storage/crc-storage-crc-9rcns" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.844563 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkn5b\" (UniqueName: \"kubernetes.io/projected/865c6998-861a-4ec0-a4d6-6686f7867c81-kube-api-access-fkn5b\") pod \"crc-storage-crc-9rcns\" (UID: \"865c6998-861a-4ec0-a4d6-6686f7867c81\") " pod="crc-storage/crc-storage-crc-9rcns" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.844996 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/865c6998-861a-4ec0-a4d6-6686f7867c81-node-mnt\") pod \"crc-storage-crc-9rcns\" (UID: \"865c6998-861a-4ec0-a4d6-6686f7867c81\") " pod="crc-storage/crc-storage-crc-9rcns" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.947162 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/865c6998-861a-4ec0-a4d6-6686f7867c81-node-mnt\") pod \"crc-storage-crc-9rcns\" (UID: \"865c6998-861a-4ec0-a4d6-6686f7867c81\") " pod="crc-storage/crc-storage-crc-9rcns" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.947245 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/865c6998-861a-4ec0-a4d6-6686f7867c81-crc-storage\") pod \"crc-storage-crc-9rcns\" (UID: \"865c6998-861a-4ec0-a4d6-6686f7867c81\") " pod="crc-storage/crc-storage-crc-9rcns" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.947314 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkn5b\" (UniqueName: \"kubernetes.io/projected/865c6998-861a-4ec0-a4d6-6686f7867c81-kube-api-access-fkn5b\") pod \"crc-storage-crc-9rcns\" (UID: \"865c6998-861a-4ec0-a4d6-6686f7867c81\") " pod="crc-storage/crc-storage-crc-9rcns" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.947686 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/865c6998-861a-4ec0-a4d6-6686f7867c81-node-mnt\") pod \"crc-storage-crc-9rcns\" (UID: \"865c6998-861a-4ec0-a4d6-6686f7867c81\") " pod="crc-storage/crc-storage-crc-9rcns" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.948337 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/865c6998-861a-4ec0-a4d6-6686f7867c81-crc-storage\") pod \"crc-storage-crc-9rcns\" (UID: \"865c6998-861a-4ec0-a4d6-6686f7867c81\") " pod="crc-storage/crc-storage-crc-9rcns" Oct 06 22:50:25 crc kubenswrapper[5014]: I1006 22:50:25.976466 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkn5b\" (UniqueName: \"kubernetes.io/projected/865c6998-861a-4ec0-a4d6-6686f7867c81-kube-api-access-fkn5b\") pod \"crc-storage-crc-9rcns\" (UID: \"865c6998-861a-4ec0-a4d6-6686f7867c81\") " pod="crc-storage/crc-storage-crc-9rcns" Oct 06 22:50:26 crc kubenswrapper[5014]: I1006 22:50:26.086343 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-9rcns" Oct 06 22:50:26 crc kubenswrapper[5014]: I1006 22:50:26.336661 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-9rcns"] Oct 06 22:50:26 crc kubenswrapper[5014]: I1006 22:50:26.961681 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-9rcns" event={"ID":"865c6998-861a-4ec0-a4d6-6686f7867c81","Type":"ContainerStarted","Data":"e399a6a18e9f6dff9a93afcac19511d0513ae60e834b56d2c348a4e63624c52c"} Oct 06 22:50:27 crc kubenswrapper[5014]: I1006 22:50:27.503276 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2bdca7c7-f000-45ff-8bf0-099941895915" path="/var/lib/kubelet/pods/2bdca7c7-f000-45ff-8bf0-099941895915/volumes" Oct 06 22:50:27 crc kubenswrapper[5014]: I1006 22:50:27.972822 5014 generic.go:334] "Generic (PLEG): container finished" podID="865c6998-861a-4ec0-a4d6-6686f7867c81" containerID="37d7395f5af876dd7ff4c0bc59ac9fe6deed90740d7d53cd685cb90787f3de10" exitCode=0 Oct 06 22:50:27 crc kubenswrapper[5014]: I1006 22:50:27.973057 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-9rcns" event={"ID":"865c6998-861a-4ec0-a4d6-6686f7867c81","Type":"ContainerDied","Data":"37d7395f5af876dd7ff4c0bc59ac9fe6deed90740d7d53cd685cb90787f3de10"} Oct 06 22:50:29 crc kubenswrapper[5014]: I1006 22:50:29.341231 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-9rcns" Oct 06 22:50:29 crc kubenswrapper[5014]: I1006 22:50:29.508305 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fkn5b\" (UniqueName: \"kubernetes.io/projected/865c6998-861a-4ec0-a4d6-6686f7867c81-kube-api-access-fkn5b\") pod \"865c6998-861a-4ec0-a4d6-6686f7867c81\" (UID: \"865c6998-861a-4ec0-a4d6-6686f7867c81\") " Oct 06 22:50:29 crc kubenswrapper[5014]: I1006 22:50:29.508401 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/865c6998-861a-4ec0-a4d6-6686f7867c81-node-mnt\") pod \"865c6998-861a-4ec0-a4d6-6686f7867c81\" (UID: \"865c6998-861a-4ec0-a4d6-6686f7867c81\") " Oct 06 22:50:29 crc kubenswrapper[5014]: I1006 22:50:29.508608 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/865c6998-861a-4ec0-a4d6-6686f7867c81-crc-storage\") pod \"865c6998-861a-4ec0-a4d6-6686f7867c81\" (UID: \"865c6998-861a-4ec0-a4d6-6686f7867c81\") " Oct 06 22:50:29 crc kubenswrapper[5014]: I1006 22:50:29.508937 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/865c6998-861a-4ec0-a4d6-6686f7867c81-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "865c6998-861a-4ec0-a4d6-6686f7867c81" (UID: "865c6998-861a-4ec0-a4d6-6686f7867c81"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 22:50:29 crc kubenswrapper[5014]: I1006 22:50:29.509129 5014 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/865c6998-861a-4ec0-a4d6-6686f7867c81-node-mnt\") on node \"crc\" DevicePath \"\"" Oct 06 22:50:29 crc kubenswrapper[5014]: I1006 22:50:29.516692 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/865c6998-861a-4ec0-a4d6-6686f7867c81-kube-api-access-fkn5b" (OuterVolumeSpecName: "kube-api-access-fkn5b") pod "865c6998-861a-4ec0-a4d6-6686f7867c81" (UID: "865c6998-861a-4ec0-a4d6-6686f7867c81"). InnerVolumeSpecName "kube-api-access-fkn5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:50:29 crc kubenswrapper[5014]: I1006 22:50:29.543791 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/865c6998-861a-4ec0-a4d6-6686f7867c81-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "865c6998-861a-4ec0-a4d6-6686f7867c81" (UID: "865c6998-861a-4ec0-a4d6-6686f7867c81"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:50:29 crc kubenswrapper[5014]: I1006 22:50:29.611165 5014 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/865c6998-861a-4ec0-a4d6-6686f7867c81-crc-storage\") on node \"crc\" DevicePath \"\"" Oct 06 22:50:29 crc kubenswrapper[5014]: I1006 22:50:29.611487 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fkn5b\" (UniqueName: \"kubernetes.io/projected/865c6998-861a-4ec0-a4d6-6686f7867c81-kube-api-access-fkn5b\") on node \"crc\" DevicePath \"\"" Oct 06 22:50:29 crc kubenswrapper[5014]: I1006 22:50:29.992157 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-9rcns" event={"ID":"865c6998-861a-4ec0-a4d6-6686f7867c81","Type":"ContainerDied","Data":"e399a6a18e9f6dff9a93afcac19511d0513ae60e834b56d2c348a4e63624c52c"} Oct 06 22:50:29 crc kubenswrapper[5014]: I1006 22:50:29.992228 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e399a6a18e9f6dff9a93afcac19511d0513ae60e834b56d2c348a4e63624c52c" Oct 06 22:50:29 crc kubenswrapper[5014]: I1006 22:50:29.992312 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-9rcns" Oct 06 22:50:35 crc kubenswrapper[5014]: I1006 22:50:35.493006 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:50:35 crc kubenswrapper[5014]: E1006 22:50:35.494515 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:50:49 crc kubenswrapper[5014]: I1006 22:50:49.485228 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:50:49 crc kubenswrapper[5014]: E1006 22:50:49.486420 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:51:01 crc kubenswrapper[5014]: I1006 22:51:01.486435 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:51:01 crc kubenswrapper[5014]: E1006 22:51:01.487435 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:51:02 crc kubenswrapper[5014]: I1006 22:51:02.787738 5014 scope.go:117] "RemoveContainer" containerID="29ced8a93e901b500a5cc3c9a2015cf2a739b1a3ccd388d15c7a9a1f68a2b548" Oct 06 22:51:12 crc kubenswrapper[5014]: I1006 22:51:12.484467 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:51:12 crc kubenswrapper[5014]: E1006 22:51:12.486773 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:51:26 crc kubenswrapper[5014]: I1006 22:51:26.485677 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:51:27 crc kubenswrapper[5014]: I1006 22:51:27.538152 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"f1b2dba782986714b78842b612ab901075cbfeaf1e1971b541c84d452a74bcc2"} Oct 06 22:52:25 crc kubenswrapper[5014]: I1006 22:52:25.917429 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b8f87f5c5-nrdbt"] Oct 06 22:52:25 crc kubenswrapper[5014]: E1006 22:52:25.918419 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="865c6998-861a-4ec0-a4d6-6686f7867c81" containerName="storage" Oct 06 22:52:25 crc kubenswrapper[5014]: I1006 22:52:25.918438 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="865c6998-861a-4ec0-a4d6-6686f7867c81" containerName="storage" Oct 06 22:52:25 crc kubenswrapper[5014]: I1006 22:52:25.918646 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="865c6998-861a-4ec0-a4d6-6686f7867c81" containerName="storage" Oct 06 22:52:25 crc kubenswrapper[5014]: I1006 22:52:25.919602 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8f87f5c5-nrdbt" Oct 06 22:52:25 crc kubenswrapper[5014]: I1006 22:52:25.921567 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-678578b8df-5br5p"] Oct 06 22:52:25 crc kubenswrapper[5014]: I1006 22:52:25.922045 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-j66br" Oct 06 22:52:25 crc kubenswrapper[5014]: I1006 22:52:25.922529 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Oct 06 22:52:25 crc kubenswrapper[5014]: I1006 22:52:25.922636 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-678578b8df-5br5p" Oct 06 22:52:25 crc kubenswrapper[5014]: I1006 22:52:25.922720 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Oct 06 22:52:25 crc kubenswrapper[5014]: I1006 22:52:25.923320 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Oct 06 22:52:25 crc kubenswrapper[5014]: I1006 22:52:25.925900 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Oct 06 22:52:25 crc kubenswrapper[5014]: I1006 22:52:25.935412 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b8f87f5c5-nrdbt"] Oct 06 22:52:25 crc kubenswrapper[5014]: I1006 22:52:25.939735 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-678578b8df-5br5p"] Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.048912 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-config\") pod \"dnsmasq-dns-6b8f87f5c5-nrdbt\" (UID: \"839cc0a2-d1c3-46ee-89ac-18a96b92fee3\") " pod="openstack/dnsmasq-dns-6b8f87f5c5-nrdbt" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.048963 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvzvg\" (UniqueName: \"kubernetes.io/projected/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-kube-api-access-zvzvg\") pod \"dnsmasq-dns-6b8f87f5c5-nrdbt\" (UID: \"839cc0a2-d1c3-46ee-89ac-18a96b92fee3\") " pod="openstack/dnsmasq-dns-6b8f87f5c5-nrdbt" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.048984 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f26e5875-7ddb-4406-a0c4-9f8aa0b810f4-config\") pod \"dnsmasq-dns-678578b8df-5br5p\" (UID: \"f26e5875-7ddb-4406-a0c4-9f8aa0b810f4\") " pod="openstack/dnsmasq-dns-678578b8df-5br5p" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.049005 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-dns-svc\") pod \"dnsmasq-dns-6b8f87f5c5-nrdbt\" (UID: \"839cc0a2-d1c3-46ee-89ac-18a96b92fee3\") " pod="openstack/dnsmasq-dns-6b8f87f5c5-nrdbt" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.049024 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvmht\" (UniqueName: \"kubernetes.io/projected/f26e5875-7ddb-4406-a0c4-9f8aa0b810f4-kube-api-access-qvmht\") pod \"dnsmasq-dns-678578b8df-5br5p\" (UID: \"f26e5875-7ddb-4406-a0c4-9f8aa0b810f4\") " pod="openstack/dnsmasq-dns-678578b8df-5br5p" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.149988 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-config\") pod \"dnsmasq-dns-6b8f87f5c5-nrdbt\" (UID: \"839cc0a2-d1c3-46ee-89ac-18a96b92fee3\") " pod="openstack/dnsmasq-dns-6b8f87f5c5-nrdbt" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.150046 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvzvg\" (UniqueName: \"kubernetes.io/projected/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-kube-api-access-zvzvg\") pod \"dnsmasq-dns-6b8f87f5c5-nrdbt\" (UID: \"839cc0a2-d1c3-46ee-89ac-18a96b92fee3\") " pod="openstack/dnsmasq-dns-6b8f87f5c5-nrdbt" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.150064 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f26e5875-7ddb-4406-a0c4-9f8aa0b810f4-config\") pod \"dnsmasq-dns-678578b8df-5br5p\" (UID: \"f26e5875-7ddb-4406-a0c4-9f8aa0b810f4\") " pod="openstack/dnsmasq-dns-678578b8df-5br5p" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.150086 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-dns-svc\") pod \"dnsmasq-dns-6b8f87f5c5-nrdbt\" (UID: \"839cc0a2-d1c3-46ee-89ac-18a96b92fee3\") " pod="openstack/dnsmasq-dns-6b8f87f5c5-nrdbt" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.150105 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvmht\" (UniqueName: \"kubernetes.io/projected/f26e5875-7ddb-4406-a0c4-9f8aa0b810f4-kube-api-access-qvmht\") pod \"dnsmasq-dns-678578b8df-5br5p\" (UID: \"f26e5875-7ddb-4406-a0c4-9f8aa0b810f4\") " pod="openstack/dnsmasq-dns-678578b8df-5br5p" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.151398 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f26e5875-7ddb-4406-a0c4-9f8aa0b810f4-config\") pod \"dnsmasq-dns-678578b8df-5br5p\" (UID: \"f26e5875-7ddb-4406-a0c4-9f8aa0b810f4\") " pod="openstack/dnsmasq-dns-678578b8df-5br5p" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.151522 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-dns-svc\") pod \"dnsmasq-dns-6b8f87f5c5-nrdbt\" (UID: \"839cc0a2-d1c3-46ee-89ac-18a96b92fee3\") " pod="openstack/dnsmasq-dns-6b8f87f5c5-nrdbt" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.151581 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-config\") pod \"dnsmasq-dns-6b8f87f5c5-nrdbt\" (UID: \"839cc0a2-d1c3-46ee-89ac-18a96b92fee3\") " pod="openstack/dnsmasq-dns-6b8f87f5c5-nrdbt" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.170992 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvzvg\" (UniqueName: \"kubernetes.io/projected/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-kube-api-access-zvzvg\") pod \"dnsmasq-dns-6b8f87f5c5-nrdbt\" (UID: \"839cc0a2-d1c3-46ee-89ac-18a96b92fee3\") " pod="openstack/dnsmasq-dns-6b8f87f5c5-nrdbt" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.171531 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvmht\" (UniqueName: \"kubernetes.io/projected/f26e5875-7ddb-4406-a0c4-9f8aa0b810f4-kube-api-access-qvmht\") pod \"dnsmasq-dns-678578b8df-5br5p\" (UID: \"f26e5875-7ddb-4406-a0c4-9f8aa0b810f4\") " pod="openstack/dnsmasq-dns-678578b8df-5br5p" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.246328 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8f87f5c5-nrdbt" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.255975 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-678578b8df-5br5p" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.351086 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-678578b8df-5br5p"] Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.370371 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85f98b87f9-htpmq"] Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.371601 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.375501 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85f98b87f9-htpmq"] Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.456170 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wpc6\" (UniqueName: \"kubernetes.io/projected/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-kube-api-access-7wpc6\") pod \"dnsmasq-dns-85f98b87f9-htpmq\" (UID: \"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3\") " pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.456223 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-dns-svc\") pod \"dnsmasq-dns-85f98b87f9-htpmq\" (UID: \"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3\") " pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.456279 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-config\") pod \"dnsmasq-dns-85f98b87f9-htpmq\" (UID: \"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3\") " pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.557598 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wpc6\" (UniqueName: \"kubernetes.io/projected/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-kube-api-access-7wpc6\") pod \"dnsmasq-dns-85f98b87f9-htpmq\" (UID: \"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3\") " pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.557667 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-dns-svc\") pod \"dnsmasq-dns-85f98b87f9-htpmq\" (UID: \"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3\") " pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.557720 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-config\") pod \"dnsmasq-dns-85f98b87f9-htpmq\" (UID: \"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3\") " pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.558535 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-config\") pod \"dnsmasq-dns-85f98b87f9-htpmq\" (UID: \"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3\") " pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.559304 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-dns-svc\") pod \"dnsmasq-dns-85f98b87f9-htpmq\" (UID: \"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3\") " pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.588720 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wpc6\" (UniqueName: \"kubernetes.io/projected/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-kube-api-access-7wpc6\") pod \"dnsmasq-dns-85f98b87f9-htpmq\" (UID: \"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3\") " pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.656413 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b8f87f5c5-nrdbt"] Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.677025 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-67d9f7fb89-2rzgz"] Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.678238 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.693116 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.694359 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67d9f7fb89-2rzgz"] Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.807942 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b8f87f5c5-nrdbt"] Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.861277 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bdd26dc8-c018-4079-9038-ca724382fc8f-config\") pod \"dnsmasq-dns-67d9f7fb89-2rzgz\" (UID: \"bdd26dc8-c018-4079-9038-ca724382fc8f\") " pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.861322 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mt92s\" (UniqueName: \"kubernetes.io/projected/bdd26dc8-c018-4079-9038-ca724382fc8f-kube-api-access-mt92s\") pod \"dnsmasq-dns-67d9f7fb89-2rzgz\" (UID: \"bdd26dc8-c018-4079-9038-ca724382fc8f\") " pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.861345 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bdd26dc8-c018-4079-9038-ca724382fc8f-dns-svc\") pod \"dnsmasq-dns-67d9f7fb89-2rzgz\" (UID: \"bdd26dc8-c018-4079-9038-ca724382fc8f\") " pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.899861 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-678578b8df-5br5p"] Oct 06 22:52:26 crc kubenswrapper[5014]: W1006 22:52:26.910231 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf26e5875_7ddb_4406_a0c4_9f8aa0b810f4.slice/crio-9d03e19299385c4efddba8e093cb16ae8e13e718f9565e77e97ef4b96eae79ce WatchSource:0}: Error finding container 9d03e19299385c4efddba8e093cb16ae8e13e718f9565e77e97ef4b96eae79ce: Status 404 returned error can't find the container with id 9d03e19299385c4efddba8e093cb16ae8e13e718f9565e77e97ef4b96eae79ce Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.962520 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bdd26dc8-c018-4079-9038-ca724382fc8f-dns-svc\") pod \"dnsmasq-dns-67d9f7fb89-2rzgz\" (UID: \"bdd26dc8-c018-4079-9038-ca724382fc8f\") " pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.962654 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bdd26dc8-c018-4079-9038-ca724382fc8f-config\") pod \"dnsmasq-dns-67d9f7fb89-2rzgz\" (UID: \"bdd26dc8-c018-4079-9038-ca724382fc8f\") " pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.962680 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mt92s\" (UniqueName: \"kubernetes.io/projected/bdd26dc8-c018-4079-9038-ca724382fc8f-kube-api-access-mt92s\") pod \"dnsmasq-dns-67d9f7fb89-2rzgz\" (UID: \"bdd26dc8-c018-4079-9038-ca724382fc8f\") " pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.963475 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bdd26dc8-c018-4079-9038-ca724382fc8f-dns-svc\") pod \"dnsmasq-dns-67d9f7fb89-2rzgz\" (UID: \"bdd26dc8-c018-4079-9038-ca724382fc8f\") " pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" Oct 06 22:52:26 crc kubenswrapper[5014]: I1006 22:52:26.964438 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bdd26dc8-c018-4079-9038-ca724382fc8f-config\") pod \"dnsmasq-dns-67d9f7fb89-2rzgz\" (UID: \"bdd26dc8-c018-4079-9038-ca724382fc8f\") " pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:26.999661 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mt92s\" (UniqueName: \"kubernetes.io/projected/bdd26dc8-c018-4079-9038-ca724382fc8f-kube-api-access-mt92s\") pod \"dnsmasq-dns-67d9f7fb89-2rzgz\" (UID: \"bdd26dc8-c018-4079-9038-ca724382fc8f\") " pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.000863 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.124251 5014 generic.go:334] "Generic (PLEG): container finished" podID="839cc0a2-d1c3-46ee-89ac-18a96b92fee3" containerID="98700e264b7f5ca01349824c72895f153e03d6971c1b0f9fa1d269e01d3b1dbe" exitCode=0 Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.124339 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8f87f5c5-nrdbt" event={"ID":"839cc0a2-d1c3-46ee-89ac-18a96b92fee3","Type":"ContainerDied","Data":"98700e264b7f5ca01349824c72895f153e03d6971c1b0f9fa1d269e01d3b1dbe"} Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.124379 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8f87f5c5-nrdbt" event={"ID":"839cc0a2-d1c3-46ee-89ac-18a96b92fee3","Type":"ContainerStarted","Data":"8f4dfd0caa61d773c269b8d2349867412d78c5f8ee282c292c3096a46bc7cca2"} Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.139122 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-678578b8df-5br5p" event={"ID":"f26e5875-7ddb-4406-a0c4-9f8aa0b810f4","Type":"ContainerStarted","Data":"9d03e19299385c4efddba8e093cb16ae8e13e718f9565e77e97ef4b96eae79ce"} Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.139267 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-678578b8df-5br5p" podUID="f26e5875-7ddb-4406-a0c4-9f8aa0b810f4" containerName="init" containerID="cri-o://155d4e8fb6cf8bfec9b90df39a2ad409363e1285e1eb7959a43736526d174db2" gracePeriod=10 Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.160969 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85f98b87f9-htpmq"] Oct 06 22:52:27 crc kubenswrapper[5014]: W1006 22:52:27.199262 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod544b38ec_006e_4f9a_8ce4_dbe28d3df2e3.slice/crio-26b8b1bb687975528c0573dadf3a896c667264527fff01042952eae90cc71d46 WatchSource:0}: Error finding container 26b8b1bb687975528c0573dadf3a896c667264527fff01042952eae90cc71d46: Status 404 returned error can't find the container with id 26b8b1bb687975528c0573dadf3a896c667264527fff01042952eae90cc71d46 Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.528517 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8f87f5c5-nrdbt" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.531954 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Oct 06 22:52:27 crc kubenswrapper[5014]: E1006 22:52:27.532282 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="839cc0a2-d1c3-46ee-89ac-18a96b92fee3" containerName="init" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.532305 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="839cc0a2-d1c3-46ee-89ac-18a96b92fee3" containerName="init" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.532474 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="839cc0a2-d1c3-46ee-89ac-18a96b92fee3" containerName="init" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.533332 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.536091 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.536262 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.536374 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.536477 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.536577 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-mmvsr" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.538541 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.538752 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.542403 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-678578b8df-5br5p" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.547901 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.599061 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67d9f7fb89-2rzgz"] Oct 06 22:52:27 crc kubenswrapper[5014]: W1006 22:52:27.599642 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbdd26dc8_c018_4079_9038_ca724382fc8f.slice/crio-23baef8d4309a62c8f0773566eeb02da086f8c75d07af31b9d34389bae9cc172 WatchSource:0}: Error finding container 23baef8d4309a62c8f0773566eeb02da086f8c75d07af31b9d34389bae9cc172: Status 404 returned error can't find the container with id 23baef8d4309a62c8f0773566eeb02da086f8c75d07af31b9d34389bae9cc172 Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.671387 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qvmht\" (UniqueName: \"kubernetes.io/projected/f26e5875-7ddb-4406-a0c4-9f8aa0b810f4-kube-api-access-qvmht\") pod \"f26e5875-7ddb-4406-a0c4-9f8aa0b810f4\" (UID: \"f26e5875-7ddb-4406-a0c4-9f8aa0b810f4\") " Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.671467 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-config\") pod \"839cc0a2-d1c3-46ee-89ac-18a96b92fee3\" (UID: \"839cc0a2-d1c3-46ee-89ac-18a96b92fee3\") " Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.671576 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f26e5875-7ddb-4406-a0c4-9f8aa0b810f4-config\") pod \"f26e5875-7ddb-4406-a0c4-9f8aa0b810f4\" (UID: \"f26e5875-7ddb-4406-a0c4-9f8aa0b810f4\") " Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.671687 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvzvg\" (UniqueName: \"kubernetes.io/projected/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-kube-api-access-zvzvg\") pod \"839cc0a2-d1c3-46ee-89ac-18a96b92fee3\" (UID: \"839cc0a2-d1c3-46ee-89ac-18a96b92fee3\") " Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.671737 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-dns-svc\") pod \"839cc0a2-d1c3-46ee-89ac-18a96b92fee3\" (UID: \"839cc0a2-d1c3-46ee-89ac-18a96b92fee3\") " Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.671938 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.672010 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d6fe3a81-97ea-4544-8747-edfe60d5ba74-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.672062 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.672091 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d6fe3a81-97ea-4544-8747-edfe60d5ba74-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.672107 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.672393 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.672480 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-de504b32-6a0f-469c-acf1-4778995e167d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de504b32-6a0f-469c-acf1-4778995e167d\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.672516 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.672539 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-config-data\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.672556 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.672635 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbrhz\" (UniqueName: \"kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-kube-api-access-tbrhz\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.675060 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f26e5875-7ddb-4406-a0c4-9f8aa0b810f4-kube-api-access-qvmht" (OuterVolumeSpecName: "kube-api-access-qvmht") pod "f26e5875-7ddb-4406-a0c4-9f8aa0b810f4" (UID: "f26e5875-7ddb-4406-a0c4-9f8aa0b810f4"). InnerVolumeSpecName "kube-api-access-qvmht". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.675586 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-kube-api-access-zvzvg" (OuterVolumeSpecName: "kube-api-access-zvzvg") pod "839cc0a2-d1c3-46ee-89ac-18a96b92fee3" (UID: "839cc0a2-d1c3-46ee-89ac-18a96b92fee3"). InnerVolumeSpecName "kube-api-access-zvzvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.689153 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "839cc0a2-d1c3-46ee-89ac-18a96b92fee3" (UID: "839cc0a2-d1c3-46ee-89ac-18a96b92fee3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.692651 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-config" (OuterVolumeSpecName: "config") pod "839cc0a2-d1c3-46ee-89ac-18a96b92fee3" (UID: "839cc0a2-d1c3-46ee-89ac-18a96b92fee3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.696753 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f26e5875-7ddb-4406-a0c4-9f8aa0b810f4-config" (OuterVolumeSpecName: "config") pod "f26e5875-7ddb-4406-a0c4-9f8aa0b810f4" (UID: "f26e5875-7ddb-4406-a0c4-9f8aa0b810f4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.774011 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.774062 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-config-data\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.774097 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbrhz\" (UniqueName: \"kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-kube-api-access-tbrhz\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.774134 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.774188 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d6fe3a81-97ea-4544-8747-edfe60d5ba74-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.774225 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.774259 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d6fe3a81-97ea-4544-8747-edfe60d5ba74-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.774280 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.774311 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.774347 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-de504b32-6a0f-469c-acf1-4778995e167d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de504b32-6a0f-469c-acf1-4778995e167d\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.774377 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.774438 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qvmht\" (UniqueName: \"kubernetes.io/projected/f26e5875-7ddb-4406-a0c4-9f8aa0b810f4-kube-api-access-qvmht\") on node \"crc\" DevicePath \"\"" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.774461 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-config\") on node \"crc\" DevicePath \"\"" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.774477 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f26e5875-7ddb-4406-a0c4-9f8aa0b810f4-config\") on node \"crc\" DevicePath \"\"" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.774495 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvzvg\" (UniqueName: \"kubernetes.io/projected/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-kube-api-access-zvzvg\") on node \"crc\" DevicePath \"\"" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.774509 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/839cc0a2-d1c3-46ee-89ac-18a96b92fee3-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.776690 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.776696 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-config-data\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.776777 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.776876 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.777498 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-server-conf\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.778695 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.778894 5014 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.778933 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-de504b32-6a0f-469c-acf1-4778995e167d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de504b32-6a0f-469c-acf1-4778995e167d\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b3f573573d4185f181f377a5e7547ecfec13579b863acd5df80498214dc179e0/globalmount\"" pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.780082 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.780170 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d6fe3a81-97ea-4544-8747-edfe60d5ba74-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.781424 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d6fe3a81-97ea-4544-8747-edfe60d5ba74-pod-info\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.792426 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 06 22:52:27 crc kubenswrapper[5014]: E1006 22:52:27.792922 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f26e5875-7ddb-4406-a0c4-9f8aa0b810f4" containerName="init" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.792950 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="f26e5875-7ddb-4406-a0c4-9f8aa0b810f4" containerName="init" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.793186 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="f26e5875-7ddb-4406-a0c4-9f8aa0b810f4" containerName="init" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.794366 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.796797 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.797135 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-v28mf" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.797146 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.797261 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.797591 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.799765 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.800023 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.807695 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbrhz\" (UniqueName: \"kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-kube-api-access-tbrhz\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.816839 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.846016 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-de504b32-6a0f-469c-acf1-4778995e167d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de504b32-6a0f-469c-acf1-4778995e167d\") pod \"rabbitmq-server-0\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.856781 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.977359 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-feeffabf-0180-4772-a39d-d981e979241d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-feeffabf-0180-4772-a39d-d981e979241d\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.977435 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.977469 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f8c2c614-a797-4f9f-94ed-40928e80fabb-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.977498 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.977530 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f8c2c614-a797-4f9f-94ed-40928e80fabb-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.977566 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.977598 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.977697 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.978026 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.978184 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:27 crc kubenswrapper[5014]: I1006 22:52:27.978355 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-68dzr\" (UniqueName: \"kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-kube-api-access-68dzr\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.079295 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.079412 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-68dzr\" (UniqueName: \"kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-kube-api-access-68dzr\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.080128 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-feeffabf-0180-4772-a39d-d981e979241d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-feeffabf-0180-4772-a39d-d981e979241d\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.082248 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.082304 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f8c2c614-a797-4f9f-94ed-40928e80fabb-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.082329 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.082461 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f8c2c614-a797-4f9f-94ed-40928e80fabb-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.083013 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.083319 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.083615 5014 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.083837 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-feeffabf-0180-4772-a39d-d981e979241d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-feeffabf-0180-4772-a39d-d981e979241d\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/d8fdada76f3df58e0c1d0071a9e2c1d2e798bdf0cb0b90a59d90c980b534487e/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.083662 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.084445 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.084467 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.084527 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.084606 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.086026 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.087200 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.089078 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.089080 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f8c2c614-a797-4f9f-94ed-40928e80fabb-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.090492 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.091900 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f8c2c614-a797-4f9f-94ed-40928e80fabb-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.094906 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-68dzr\" (UniqueName: \"kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-kube-api-access-68dzr\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.111567 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-feeffabf-0180-4772-a39d-d981e979241d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-feeffabf-0180-4772-a39d-d981e979241d\") pod \"rabbitmq-cell1-server-0\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.143883 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.149568 5014 generic.go:334] "Generic (PLEG): container finished" podID="544b38ec-006e-4f9a-8ce4-dbe28d3df2e3" containerID="503b94d1c190b13a8d7f8a859977a8ffbf18c470372bca3564b86054f1f21d1b" exitCode=0 Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.149654 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" event={"ID":"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3","Type":"ContainerDied","Data":"503b94d1c190b13a8d7f8a859977a8ffbf18c470372bca3564b86054f1f21d1b"} Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.149692 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" event={"ID":"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3","Type":"ContainerStarted","Data":"26b8b1bb687975528c0573dadf3a896c667264527fff01042952eae90cc71d46"} Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.151707 5014 generic.go:334] "Generic (PLEG): container finished" podID="f26e5875-7ddb-4406-a0c4-9f8aa0b810f4" containerID="155d4e8fb6cf8bfec9b90df39a2ad409363e1285e1eb7959a43736526d174db2" exitCode=0 Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.151769 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-678578b8df-5br5p" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.151781 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-678578b8df-5br5p" event={"ID":"f26e5875-7ddb-4406-a0c4-9f8aa0b810f4","Type":"ContainerDied","Data":"155d4e8fb6cf8bfec9b90df39a2ad409363e1285e1eb7959a43736526d174db2"} Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.151807 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-678578b8df-5br5p" event={"ID":"f26e5875-7ddb-4406-a0c4-9f8aa0b810f4","Type":"ContainerDied","Data":"9d03e19299385c4efddba8e093cb16ae8e13e718f9565e77e97ef4b96eae79ce"} Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.151826 5014 scope.go:117] "RemoveContainer" containerID="155d4e8fb6cf8bfec9b90df39a2ad409363e1285e1eb7959a43736526d174db2" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.154382 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8f87f5c5-nrdbt" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.154399 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8f87f5c5-nrdbt" event={"ID":"839cc0a2-d1c3-46ee-89ac-18a96b92fee3","Type":"ContainerDied","Data":"8f4dfd0caa61d773c269b8d2349867412d78c5f8ee282c292c3096a46bc7cca2"} Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.156494 5014 generic.go:334] "Generic (PLEG): container finished" podID="bdd26dc8-c018-4079-9038-ca724382fc8f" containerID="f60e074e49b4de4096e4b43cd27f086e38957c24a5daaa3cf51255f305a9e63a" exitCode=0 Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.156534 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" event={"ID":"bdd26dc8-c018-4079-9038-ca724382fc8f","Type":"ContainerDied","Data":"f60e074e49b4de4096e4b43cd27f086e38957c24a5daaa3cf51255f305a9e63a"} Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.156561 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" event={"ID":"bdd26dc8-c018-4079-9038-ca724382fc8f","Type":"ContainerStarted","Data":"23baef8d4309a62c8f0773566eeb02da086f8c75d07af31b9d34389bae9cc172"} Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.173308 5014 scope.go:117] "RemoveContainer" containerID="155d4e8fb6cf8bfec9b90df39a2ad409363e1285e1eb7959a43736526d174db2" Oct 06 22:52:28 crc kubenswrapper[5014]: E1006 22:52:28.173782 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"155d4e8fb6cf8bfec9b90df39a2ad409363e1285e1eb7959a43736526d174db2\": container with ID starting with 155d4e8fb6cf8bfec9b90df39a2ad409363e1285e1eb7959a43736526d174db2 not found: ID does not exist" containerID="155d4e8fb6cf8bfec9b90df39a2ad409363e1285e1eb7959a43736526d174db2" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.173828 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"155d4e8fb6cf8bfec9b90df39a2ad409363e1285e1eb7959a43736526d174db2"} err="failed to get container status \"155d4e8fb6cf8bfec9b90df39a2ad409363e1285e1eb7959a43736526d174db2\": rpc error: code = NotFound desc = could not find container \"155d4e8fb6cf8bfec9b90df39a2ad409363e1285e1eb7959a43736526d174db2\": container with ID starting with 155d4e8fb6cf8bfec9b90df39a2ad409363e1285e1eb7959a43736526d174db2 not found: ID does not exist" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.173866 5014 scope.go:117] "RemoveContainer" containerID="98700e264b7f5ca01349824c72895f153e03d6971c1b0f9fa1d269e01d3b1dbe" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.289259 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.332221 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b8f87f5c5-nrdbt"] Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.341383 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b8f87f5c5-nrdbt"] Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.437768 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-678578b8df-5br5p"] Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.441497 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-678578b8df-5br5p"] Oct 06 22:52:28 crc kubenswrapper[5014]: E1006 22:52:28.484597 5014 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Oct 06 22:52:28 crc kubenswrapper[5014]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Oct 06 22:52:28 crc kubenswrapper[5014]: > podSandboxID="26b8b1bb687975528c0573dadf3a896c667264527fff01042952eae90cc71d46" Oct 06 22:52:28 crc kubenswrapper[5014]: E1006 22:52:28.484778 5014 kuberuntime_manager.go:1274] "Unhandled Error" err=< Oct 06 22:52:28 crc kubenswrapper[5014]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n8chc6h5bh56fh546hb7hc8h67h5bchffh577h697h5b5h5bdh59bhf6hf4h558hb5h578h595h5cchfbh644h59ch7fh654h547h587h5cbh5d5h8fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7wpc6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-85f98b87f9-htpmq_openstack(544b38ec-006e-4f9a-8ce4-dbe28d3df2e3): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Oct 06 22:52:28 crc kubenswrapper[5014]: > logger="UnhandledError" Oct 06 22:52:28 crc kubenswrapper[5014]: E1006 22:52:28.486060 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" podUID="544b38ec-006e-4f9a-8ce4-dbe28d3df2e3" Oct 06 22:52:28 crc kubenswrapper[5014]: I1006 22:52:28.631876 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 06 22:52:28 crc kubenswrapper[5014]: W1006 22:52:28.636383 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf8c2c614_a797_4f9f_94ed_40928e80fabb.slice/crio-af1021d1652c761cd4501206f46a8386593b81e3dd3b1538ffd11764b1f025d4 WatchSource:0}: Error finding container af1021d1652c761cd4501206f46a8386593b81e3dd3b1538ffd11764b1f025d4: Status 404 returned error can't find the container with id af1021d1652c761cd4501206f46a8386593b81e3dd3b1538ffd11764b1f025d4 Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.164140 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f8c2c614-a797-4f9f-94ed-40928e80fabb","Type":"ContainerStarted","Data":"af1021d1652c761cd4501206f46a8386593b81e3dd3b1538ffd11764b1f025d4"} Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.166978 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d6fe3a81-97ea-4544-8747-edfe60d5ba74","Type":"ContainerStarted","Data":"1893ac85db2049b821abf2dbd297be005adf889075518d1c5b302f2823c02a71"} Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.170171 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" event={"ID":"bdd26dc8-c018-4079-9038-ca724382fc8f","Type":"ContainerStarted","Data":"e02f8ffc6d119061a9c1bc58ac14197924b401566d3ac21999e06fe6640f3553"} Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.183960 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" podStartSLOduration=3.183939899 podStartE2EDuration="3.183939899s" podCreationTimestamp="2025-10-06 22:52:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:52:29.183164285 +0000 UTC m=+4894.476201019" watchObservedRunningTime="2025-10-06 22:52:29.183939899 +0000 UTC m=+4894.476976623" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.497124 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="839cc0a2-d1c3-46ee-89ac-18a96b92fee3" path="/var/lib/kubelet/pods/839cc0a2-d1c3-46ee-89ac-18a96b92fee3/volumes" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.497783 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f26e5875-7ddb-4406-a0c4-9f8aa0b810f4" path="/var/lib/kubelet/pods/f26e5875-7ddb-4406-a0c4-9f8aa0b810f4/volumes" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.611839 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.616969 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.619928 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.624362 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-9sg8l" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.624602 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.624958 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.625298 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.625463 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.629213 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.710389 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfmb7\" (UniqueName: \"kubernetes.io/projected/50d8273d-66b9-40f1-8d29-23052463812d-kube-api-access-mfmb7\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.710430 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/50d8273d-66b9-40f1-8d29-23052463812d-config-data-generated\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.710453 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50d8273d-66b9-40f1-8d29-23052463812d-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.710504 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/50d8273d-66b9-40f1-8d29-23052463812d-secrets\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.710530 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50d8273d-66b9-40f1-8d29-23052463812d-operator-scripts\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.710787 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-5508ac9d-acf6-45ed-9b6e-77102c40c192\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5508ac9d-acf6-45ed-9b6e-77102c40c192\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.710849 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/50d8273d-66b9-40f1-8d29-23052463812d-config-data-default\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.710913 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/50d8273d-66b9-40f1-8d29-23052463812d-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.710996 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/50d8273d-66b9-40f1-8d29-23052463812d-kolla-config\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.812595 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-5508ac9d-acf6-45ed-9b6e-77102c40c192\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5508ac9d-acf6-45ed-9b6e-77102c40c192\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.812646 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/50d8273d-66b9-40f1-8d29-23052463812d-config-data-default\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.812672 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/50d8273d-66b9-40f1-8d29-23052463812d-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.812699 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/50d8273d-66b9-40f1-8d29-23052463812d-kolla-config\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.812746 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfmb7\" (UniqueName: \"kubernetes.io/projected/50d8273d-66b9-40f1-8d29-23052463812d-kube-api-access-mfmb7\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.812772 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/50d8273d-66b9-40f1-8d29-23052463812d-config-data-generated\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.812793 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50d8273d-66b9-40f1-8d29-23052463812d-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.812850 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/50d8273d-66b9-40f1-8d29-23052463812d-secrets\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.812883 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50d8273d-66b9-40f1-8d29-23052463812d-operator-scripts\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.813762 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/50d8273d-66b9-40f1-8d29-23052463812d-kolla-config\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.813891 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/50d8273d-66b9-40f1-8d29-23052463812d-config-data-generated\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.814432 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50d8273d-66b9-40f1-8d29-23052463812d-operator-scripts\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.815273 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/50d8273d-66b9-40f1-8d29-23052463812d-config-data-default\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.817224 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/50d8273d-66b9-40f1-8d29-23052463812d-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.817229 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50d8273d-66b9-40f1-8d29-23052463812d-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.817712 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/50d8273d-66b9-40f1-8d29-23052463812d-secrets\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.817901 5014 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.817969 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-5508ac9d-acf6-45ed-9b6e-77102c40c192\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5508ac9d-acf6-45ed-9b6e-77102c40c192\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/75b3d5b905a4aeb875615f606c1e9ba514783e5f2d7ab27e0ffc4326b34e9838/globalmount\"" pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.834394 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfmb7\" (UniqueName: \"kubernetes.io/projected/50d8273d-66b9-40f1-8d29-23052463812d-kube-api-access-mfmb7\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.855475 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-5508ac9d-acf6-45ed-9b6e-77102c40c192\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5508ac9d-acf6-45ed-9b6e-77102c40c192\") pod \"openstack-galera-0\" (UID: \"50d8273d-66b9-40f1-8d29-23052463812d\") " pod="openstack/openstack-galera-0" Oct 06 22:52:29 crc kubenswrapper[5014]: I1006 22:52:29.946179 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.036592 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.038290 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.043744 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.043814 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.043742 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-84m86" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.049203 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.064414 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.116153 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/5d2f95cf-210c-459c-a307-ce2397afb314-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.116199 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d2f95cf-210c-459c-a307-ce2397afb314-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.116356 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-13b6a050-138e-41f1-ad10-73029ba158c2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-13b6a050-138e-41f1-ad10-73029ba158c2\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.116417 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d2f95cf-210c-459c-a307-ce2397afb314-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.116442 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/5d2f95cf-210c-459c-a307-ce2397afb314-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.116464 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/5d2f95cf-210c-459c-a307-ce2397afb314-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.116504 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ff88k\" (UniqueName: \"kubernetes.io/projected/5d2f95cf-210c-459c-a307-ce2397afb314-kube-api-access-ff88k\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.116525 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d2f95cf-210c-459c-a307-ce2397afb314-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.116548 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/5d2f95cf-210c-459c-a307-ce2397afb314-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.180571 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f8c2c614-a797-4f9f-94ed-40928e80fabb","Type":"ContainerStarted","Data":"c1b0b7250b2aca3d4010e3a0efe30d3b3c2529f64db97756e81e2a31a4352e3a"} Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.182815 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d6fe3a81-97ea-4544-8747-edfe60d5ba74","Type":"ContainerStarted","Data":"7a3b275003c1e80a1658f457fb1decda90991bd3001dde391bd0e36bba6c9f41"} Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.188200 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" event={"ID":"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3","Type":"ContainerStarted","Data":"a3a689ba0b0bce324a7d83f27761d40c2e0dad4ba3c9e678fac40b94974f3ead"} Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.188908 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.188926 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.219386 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d2f95cf-210c-459c-a307-ce2397afb314-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.219438 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/5d2f95cf-210c-459c-a307-ce2397afb314-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.219469 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/5d2f95cf-210c-459c-a307-ce2397afb314-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.219508 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ff88k\" (UniqueName: \"kubernetes.io/projected/5d2f95cf-210c-459c-a307-ce2397afb314-kube-api-access-ff88k\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.219539 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d2f95cf-210c-459c-a307-ce2397afb314-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.219590 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/5d2f95cf-210c-459c-a307-ce2397afb314-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.219652 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/5d2f95cf-210c-459c-a307-ce2397afb314-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.219682 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d2f95cf-210c-459c-a307-ce2397afb314-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.219910 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-13b6a050-138e-41f1-ad10-73029ba158c2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-13b6a050-138e-41f1-ad10-73029ba158c2\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.220488 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/5d2f95cf-210c-459c-a307-ce2397afb314-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.221294 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/5d2f95cf-210c-459c-a307-ce2397afb314-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.221370 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d2f95cf-210c-459c-a307-ce2397afb314-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.221506 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/5d2f95cf-210c-459c-a307-ce2397afb314-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.224610 5014 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.224685 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-13b6a050-138e-41f1-ad10-73029ba158c2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-13b6a050-138e-41f1-ad10-73029ba158c2\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0d15fbded5543f1827858b65abc8a4afb93fe754bf8bff202556cb242c2df38a/globalmount\"" pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.225545 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/5d2f95cf-210c-459c-a307-ce2397afb314-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.235941 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d2f95cf-210c-459c-a307-ce2397afb314-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.237881 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" podStartSLOduration=4.237867296 podStartE2EDuration="4.237867296s" podCreationTimestamp="2025-10-06 22:52:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:52:30.234340506 +0000 UTC m=+4895.527377250" watchObservedRunningTime="2025-10-06 22:52:30.237867296 +0000 UTC m=+4895.530904030" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.240953 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ff88k\" (UniqueName: \"kubernetes.io/projected/5d2f95cf-210c-459c-a307-ce2397afb314-kube-api-access-ff88k\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.254229 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d2f95cf-210c-459c-a307-ce2397afb314-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.300779 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-13b6a050-138e-41f1-ad10-73029ba158c2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-13b6a050-138e-41f1-ad10-73029ba158c2\") pod \"openstack-cell1-galera-0\" (UID: \"5d2f95cf-210c-459c-a307-ce2397afb314\") " pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.363994 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.465122 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.687290 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.688845 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.690370 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.690839 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-zncvl" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.690971 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.694572 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.728515 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8557459e-5fc3-4d04-8827-8e8924429c15-combined-ca-bundle\") pod \"memcached-0\" (UID: \"8557459e-5fc3-4d04-8827-8e8924429c15\") " pod="openstack/memcached-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.728570 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8557459e-5fc3-4d04-8827-8e8924429c15-config-data\") pod \"memcached-0\" (UID: \"8557459e-5fc3-4d04-8827-8e8924429c15\") " pod="openstack/memcached-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.728661 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8557459e-5fc3-4d04-8827-8e8924429c15-memcached-tls-certs\") pod \"memcached-0\" (UID: \"8557459e-5fc3-4d04-8827-8e8924429c15\") " pod="openstack/memcached-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.728690 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xrpt\" (UniqueName: \"kubernetes.io/projected/8557459e-5fc3-4d04-8827-8e8924429c15-kube-api-access-8xrpt\") pod \"memcached-0\" (UID: \"8557459e-5fc3-4d04-8827-8e8924429c15\") " pod="openstack/memcached-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.728708 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8557459e-5fc3-4d04-8827-8e8924429c15-kolla-config\") pod \"memcached-0\" (UID: \"8557459e-5fc3-4d04-8827-8e8924429c15\") " pod="openstack/memcached-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.811384 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.829812 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xrpt\" (UniqueName: \"kubernetes.io/projected/8557459e-5fc3-4d04-8827-8e8924429c15-kube-api-access-8xrpt\") pod \"memcached-0\" (UID: \"8557459e-5fc3-4d04-8827-8e8924429c15\") " pod="openstack/memcached-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.829881 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8557459e-5fc3-4d04-8827-8e8924429c15-kolla-config\") pod \"memcached-0\" (UID: \"8557459e-5fc3-4d04-8827-8e8924429c15\") " pod="openstack/memcached-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.829948 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8557459e-5fc3-4d04-8827-8e8924429c15-combined-ca-bundle\") pod \"memcached-0\" (UID: \"8557459e-5fc3-4d04-8827-8e8924429c15\") " pod="openstack/memcached-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.829995 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8557459e-5fc3-4d04-8827-8e8924429c15-config-data\") pod \"memcached-0\" (UID: \"8557459e-5fc3-4d04-8827-8e8924429c15\") " pod="openstack/memcached-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.830099 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8557459e-5fc3-4d04-8827-8e8924429c15-memcached-tls-certs\") pod \"memcached-0\" (UID: \"8557459e-5fc3-4d04-8827-8e8924429c15\") " pod="openstack/memcached-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.831021 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8557459e-5fc3-4d04-8827-8e8924429c15-kolla-config\") pod \"memcached-0\" (UID: \"8557459e-5fc3-4d04-8827-8e8924429c15\") " pod="openstack/memcached-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.831076 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8557459e-5fc3-4d04-8827-8e8924429c15-config-data\") pod \"memcached-0\" (UID: \"8557459e-5fc3-4d04-8827-8e8924429c15\") " pod="openstack/memcached-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.835553 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8557459e-5fc3-4d04-8827-8e8924429c15-combined-ca-bundle\") pod \"memcached-0\" (UID: \"8557459e-5fc3-4d04-8827-8e8924429c15\") " pod="openstack/memcached-0" Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.836698 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8557459e-5fc3-4d04-8827-8e8924429c15-memcached-tls-certs\") pod \"memcached-0\" (UID: \"8557459e-5fc3-4d04-8827-8e8924429c15\") " pod="openstack/memcached-0" Oct 06 22:52:30 crc kubenswrapper[5014]: W1006 22:52:30.839321 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5d2f95cf_210c_459c_a307_ce2397afb314.slice/crio-bb56b3f81ae06a25f4176b797596b30bc1dbfce57087be91fa69e1b351201da9 WatchSource:0}: Error finding container bb56b3f81ae06a25f4176b797596b30bc1dbfce57087be91fa69e1b351201da9: Status 404 returned error can't find the container with id bb56b3f81ae06a25f4176b797596b30bc1dbfce57087be91fa69e1b351201da9 Oct 06 22:52:30 crc kubenswrapper[5014]: I1006 22:52:30.847186 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xrpt\" (UniqueName: \"kubernetes.io/projected/8557459e-5fc3-4d04-8827-8e8924429c15-kube-api-access-8xrpt\") pod \"memcached-0\" (UID: \"8557459e-5fc3-4d04-8827-8e8924429c15\") " pod="openstack/memcached-0" Oct 06 22:52:31 crc kubenswrapper[5014]: I1006 22:52:31.005318 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Oct 06 22:52:31 crc kubenswrapper[5014]: I1006 22:52:31.196775 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"5d2f95cf-210c-459c-a307-ce2397afb314","Type":"ContainerStarted","Data":"2a652c1af7587608abf3739984f9882f52c89c5cae8fe5b2bfcd4e0a4d11b548"} Oct 06 22:52:31 crc kubenswrapper[5014]: I1006 22:52:31.196823 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"5d2f95cf-210c-459c-a307-ce2397afb314","Type":"ContainerStarted","Data":"bb56b3f81ae06a25f4176b797596b30bc1dbfce57087be91fa69e1b351201da9"} Oct 06 22:52:31 crc kubenswrapper[5014]: I1006 22:52:31.199663 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"50d8273d-66b9-40f1-8d29-23052463812d","Type":"ContainerStarted","Data":"cc57f767764d743c3296808682f33981cc7b7e15b2c7aacfd910ed83960d39f0"} Oct 06 22:52:31 crc kubenswrapper[5014]: I1006 22:52:31.199701 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"50d8273d-66b9-40f1-8d29-23052463812d","Type":"ContainerStarted","Data":"0f361800b588c2da8e12e5ef9dbbf927a933b0611cf6bcd4df951fcb8361bd1e"} Oct 06 22:52:31 crc kubenswrapper[5014]: I1006 22:52:31.444367 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Oct 06 22:52:31 crc kubenswrapper[5014]: W1006 22:52:31.453761 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8557459e_5fc3_4d04_8827_8e8924429c15.slice/crio-2e6eaf73901683448ac6ecaae4382c137df4bb1cc8599cdca8d7f9ed76a52ccc WatchSource:0}: Error finding container 2e6eaf73901683448ac6ecaae4382c137df4bb1cc8599cdca8d7f9ed76a52ccc: Status 404 returned error can't find the container with id 2e6eaf73901683448ac6ecaae4382c137df4bb1cc8599cdca8d7f9ed76a52ccc Oct 06 22:52:32 crc kubenswrapper[5014]: I1006 22:52:32.208125 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"8557459e-5fc3-4d04-8827-8e8924429c15","Type":"ContainerStarted","Data":"be9c474492ffbb272041cf4e1bf3dbb10fb230200c102cba2096b90a9aa2b979"} Oct 06 22:52:32 crc kubenswrapper[5014]: I1006 22:52:32.208532 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"8557459e-5fc3-4d04-8827-8e8924429c15","Type":"ContainerStarted","Data":"2e6eaf73901683448ac6ecaae4382c137df4bb1cc8599cdca8d7f9ed76a52ccc"} Oct 06 22:52:32 crc kubenswrapper[5014]: I1006 22:52:32.236163 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=2.2361396 podStartE2EDuration="2.2361396s" podCreationTimestamp="2025-10-06 22:52:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:52:32.22753255 +0000 UTC m=+4897.520569294" watchObservedRunningTime="2025-10-06 22:52:32.2361396 +0000 UTC m=+4897.529176344" Oct 06 22:52:33 crc kubenswrapper[5014]: I1006 22:52:33.218070 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Oct 06 22:52:34 crc kubenswrapper[5014]: I1006 22:52:34.227512 5014 generic.go:334] "Generic (PLEG): container finished" podID="50d8273d-66b9-40f1-8d29-23052463812d" containerID="cc57f767764d743c3296808682f33981cc7b7e15b2c7aacfd910ed83960d39f0" exitCode=0 Oct 06 22:52:34 crc kubenswrapper[5014]: I1006 22:52:34.227587 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"50d8273d-66b9-40f1-8d29-23052463812d","Type":"ContainerDied","Data":"cc57f767764d743c3296808682f33981cc7b7e15b2c7aacfd910ed83960d39f0"} Oct 06 22:52:35 crc kubenswrapper[5014]: I1006 22:52:35.240293 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"50d8273d-66b9-40f1-8d29-23052463812d","Type":"ContainerStarted","Data":"fa3ff01e10bc8609d454a43cc3e4e94860d2ebdd56950d7ca1a5315149ad028b"} Oct 06 22:52:35 crc kubenswrapper[5014]: I1006 22:52:35.243836 5014 generic.go:334] "Generic (PLEG): container finished" podID="5d2f95cf-210c-459c-a307-ce2397afb314" containerID="2a652c1af7587608abf3739984f9882f52c89c5cae8fe5b2bfcd4e0a4d11b548" exitCode=0 Oct 06 22:52:35 crc kubenswrapper[5014]: I1006 22:52:35.243889 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"5d2f95cf-210c-459c-a307-ce2397afb314","Type":"ContainerDied","Data":"2a652c1af7587608abf3739984f9882f52c89c5cae8fe5b2bfcd4e0a4d11b548"} Oct 06 22:52:35 crc kubenswrapper[5014]: I1006 22:52:35.289255 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=7.289220119 podStartE2EDuration="7.289220119s" podCreationTimestamp="2025-10-06 22:52:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:52:35.280112443 +0000 UTC m=+4900.573149187" watchObservedRunningTime="2025-10-06 22:52:35.289220119 +0000 UTC m=+4900.582256883" Oct 06 22:52:36 crc kubenswrapper[5014]: I1006 22:52:36.006701 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Oct 06 22:52:36 crc kubenswrapper[5014]: I1006 22:52:36.253957 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"5d2f95cf-210c-459c-a307-ce2397afb314","Type":"ContainerStarted","Data":"59e0c6bb97896cbee77a26a4117c1093db10a0f56601c2909d4f06ed9746df54"} Oct 06 22:52:36 crc kubenswrapper[5014]: I1006 22:52:36.279045 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=7.279018308 podStartE2EDuration="7.279018308s" podCreationTimestamp="2025-10-06 22:52:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:52:36.276937193 +0000 UTC m=+4901.569973937" watchObservedRunningTime="2025-10-06 22:52:36.279018308 +0000 UTC m=+4901.572055082" Oct 06 22:52:36 crc kubenswrapper[5014]: I1006 22:52:36.695761 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" Oct 06 22:52:37 crc kubenswrapper[5014]: I1006 22:52:37.002936 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" Oct 06 22:52:37 crc kubenswrapper[5014]: I1006 22:52:37.052264 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85f98b87f9-htpmq"] Oct 06 22:52:37 crc kubenswrapper[5014]: I1006 22:52:37.261861 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" podUID="544b38ec-006e-4f9a-8ce4-dbe28d3df2e3" containerName="dnsmasq-dns" containerID="cri-o://a3a689ba0b0bce324a7d83f27761d40c2e0dad4ba3c9e678fac40b94974f3ead" gracePeriod=10 Oct 06 22:52:37 crc kubenswrapper[5014]: I1006 22:52:37.699987 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" Oct 06 22:52:37 crc kubenswrapper[5014]: I1006 22:52:37.846330 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-dns-svc\") pod \"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3\" (UID: \"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3\") " Oct 06 22:52:37 crc kubenswrapper[5014]: I1006 22:52:37.846464 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-config\") pod \"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3\" (UID: \"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3\") " Oct 06 22:52:37 crc kubenswrapper[5014]: I1006 22:52:37.846556 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7wpc6\" (UniqueName: \"kubernetes.io/projected/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-kube-api-access-7wpc6\") pod \"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3\" (UID: \"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3\") " Oct 06 22:52:37 crc kubenswrapper[5014]: I1006 22:52:37.855434 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-kube-api-access-7wpc6" (OuterVolumeSpecName: "kube-api-access-7wpc6") pod "544b38ec-006e-4f9a-8ce4-dbe28d3df2e3" (UID: "544b38ec-006e-4f9a-8ce4-dbe28d3df2e3"). InnerVolumeSpecName "kube-api-access-7wpc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:52:37 crc kubenswrapper[5014]: I1006 22:52:37.896547 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-config" (OuterVolumeSpecName: "config") pod "544b38ec-006e-4f9a-8ce4-dbe28d3df2e3" (UID: "544b38ec-006e-4f9a-8ce4-dbe28d3df2e3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:52:37 crc kubenswrapper[5014]: I1006 22:52:37.900279 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "544b38ec-006e-4f9a-8ce4-dbe28d3df2e3" (UID: "544b38ec-006e-4f9a-8ce4-dbe28d3df2e3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:52:37 crc kubenswrapper[5014]: I1006 22:52:37.948987 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7wpc6\" (UniqueName: \"kubernetes.io/projected/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-kube-api-access-7wpc6\") on node \"crc\" DevicePath \"\"" Oct 06 22:52:37 crc kubenswrapper[5014]: I1006 22:52:37.949047 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 22:52:37 crc kubenswrapper[5014]: I1006 22:52:37.949072 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3-config\") on node \"crc\" DevicePath \"\"" Oct 06 22:52:38 crc kubenswrapper[5014]: I1006 22:52:38.272420 5014 generic.go:334] "Generic (PLEG): container finished" podID="544b38ec-006e-4f9a-8ce4-dbe28d3df2e3" containerID="a3a689ba0b0bce324a7d83f27761d40c2e0dad4ba3c9e678fac40b94974f3ead" exitCode=0 Oct 06 22:52:38 crc kubenswrapper[5014]: I1006 22:52:38.272477 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" Oct 06 22:52:38 crc kubenswrapper[5014]: I1006 22:52:38.272479 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" event={"ID":"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3","Type":"ContainerDied","Data":"a3a689ba0b0bce324a7d83f27761d40c2e0dad4ba3c9e678fac40b94974f3ead"} Oct 06 22:52:38 crc kubenswrapper[5014]: I1006 22:52:38.272666 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85f98b87f9-htpmq" event={"ID":"544b38ec-006e-4f9a-8ce4-dbe28d3df2e3","Type":"ContainerDied","Data":"26b8b1bb687975528c0573dadf3a896c667264527fff01042952eae90cc71d46"} Oct 06 22:52:38 crc kubenswrapper[5014]: I1006 22:52:38.272697 5014 scope.go:117] "RemoveContainer" containerID="a3a689ba0b0bce324a7d83f27761d40c2e0dad4ba3c9e678fac40b94974f3ead" Oct 06 22:52:38 crc kubenswrapper[5014]: I1006 22:52:38.297815 5014 scope.go:117] "RemoveContainer" containerID="503b94d1c190b13a8d7f8a859977a8ffbf18c470372bca3564b86054f1f21d1b" Oct 06 22:52:38 crc kubenswrapper[5014]: I1006 22:52:38.308706 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85f98b87f9-htpmq"] Oct 06 22:52:38 crc kubenswrapper[5014]: I1006 22:52:38.315719 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-85f98b87f9-htpmq"] Oct 06 22:52:38 crc kubenswrapper[5014]: I1006 22:52:38.329530 5014 scope.go:117] "RemoveContainer" containerID="a3a689ba0b0bce324a7d83f27761d40c2e0dad4ba3c9e678fac40b94974f3ead" Oct 06 22:52:38 crc kubenswrapper[5014]: E1006 22:52:38.330082 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3a689ba0b0bce324a7d83f27761d40c2e0dad4ba3c9e678fac40b94974f3ead\": container with ID starting with a3a689ba0b0bce324a7d83f27761d40c2e0dad4ba3c9e678fac40b94974f3ead not found: ID does not exist" containerID="a3a689ba0b0bce324a7d83f27761d40c2e0dad4ba3c9e678fac40b94974f3ead" Oct 06 22:52:38 crc kubenswrapper[5014]: I1006 22:52:38.330167 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3a689ba0b0bce324a7d83f27761d40c2e0dad4ba3c9e678fac40b94974f3ead"} err="failed to get container status \"a3a689ba0b0bce324a7d83f27761d40c2e0dad4ba3c9e678fac40b94974f3ead\": rpc error: code = NotFound desc = could not find container \"a3a689ba0b0bce324a7d83f27761d40c2e0dad4ba3c9e678fac40b94974f3ead\": container with ID starting with a3a689ba0b0bce324a7d83f27761d40c2e0dad4ba3c9e678fac40b94974f3ead not found: ID does not exist" Oct 06 22:52:38 crc kubenswrapper[5014]: I1006 22:52:38.330207 5014 scope.go:117] "RemoveContainer" containerID="503b94d1c190b13a8d7f8a859977a8ffbf18c470372bca3564b86054f1f21d1b" Oct 06 22:52:38 crc kubenswrapper[5014]: E1006 22:52:38.330980 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"503b94d1c190b13a8d7f8a859977a8ffbf18c470372bca3564b86054f1f21d1b\": container with ID starting with 503b94d1c190b13a8d7f8a859977a8ffbf18c470372bca3564b86054f1f21d1b not found: ID does not exist" containerID="503b94d1c190b13a8d7f8a859977a8ffbf18c470372bca3564b86054f1f21d1b" Oct 06 22:52:38 crc kubenswrapper[5014]: I1006 22:52:38.331020 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"503b94d1c190b13a8d7f8a859977a8ffbf18c470372bca3564b86054f1f21d1b"} err="failed to get container status \"503b94d1c190b13a8d7f8a859977a8ffbf18c470372bca3564b86054f1f21d1b\": rpc error: code = NotFound desc = could not find container \"503b94d1c190b13a8d7f8a859977a8ffbf18c470372bca3564b86054f1f21d1b\": container with ID starting with 503b94d1c190b13a8d7f8a859977a8ffbf18c470372bca3564b86054f1f21d1b not found: ID does not exist" Oct 06 22:52:39 crc kubenswrapper[5014]: I1006 22:52:39.501530 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="544b38ec-006e-4f9a-8ce4-dbe28d3df2e3" path="/var/lib/kubelet/pods/544b38ec-006e-4f9a-8ce4-dbe28d3df2e3/volumes" Oct 06 22:52:39 crc kubenswrapper[5014]: I1006 22:52:39.946828 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Oct 06 22:52:39 crc kubenswrapper[5014]: I1006 22:52:39.946900 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Oct 06 22:52:40 crc kubenswrapper[5014]: I1006 22:52:40.364956 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:40 crc kubenswrapper[5014]: I1006 22:52:40.365043 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:42 crc kubenswrapper[5014]: I1006 22:52:42.040322 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Oct 06 22:52:42 crc kubenswrapper[5014]: I1006 22:52:42.128964 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Oct 06 22:52:42 crc kubenswrapper[5014]: I1006 22:52:42.518730 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:42 crc kubenswrapper[5014]: I1006 22:52:42.561523 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Oct 06 22:52:54 crc kubenswrapper[5014]: I1006 22:52:54.250955 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pjtsq"] Oct 06 22:52:54 crc kubenswrapper[5014]: E1006 22:52:54.252351 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="544b38ec-006e-4f9a-8ce4-dbe28d3df2e3" containerName="dnsmasq-dns" Oct 06 22:52:54 crc kubenswrapper[5014]: I1006 22:52:54.252383 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="544b38ec-006e-4f9a-8ce4-dbe28d3df2e3" containerName="dnsmasq-dns" Oct 06 22:52:54 crc kubenswrapper[5014]: E1006 22:52:54.252450 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="544b38ec-006e-4f9a-8ce4-dbe28d3df2e3" containerName="init" Oct 06 22:52:54 crc kubenswrapper[5014]: I1006 22:52:54.252468 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="544b38ec-006e-4f9a-8ce4-dbe28d3df2e3" containerName="init" Oct 06 22:52:54 crc kubenswrapper[5014]: I1006 22:52:54.252867 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="544b38ec-006e-4f9a-8ce4-dbe28d3df2e3" containerName="dnsmasq-dns" Oct 06 22:52:54 crc kubenswrapper[5014]: I1006 22:52:54.255659 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:52:54 crc kubenswrapper[5014]: I1006 22:52:54.263669 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pjtsq"] Oct 06 22:52:54 crc kubenswrapper[5014]: I1006 22:52:54.426500 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3eeaa18-06c9-41be-87c1-fa8e57198da0-utilities\") pod \"redhat-operators-pjtsq\" (UID: \"d3eeaa18-06c9-41be-87c1-fa8e57198da0\") " pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:52:54 crc kubenswrapper[5014]: I1006 22:52:54.426770 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3eeaa18-06c9-41be-87c1-fa8e57198da0-catalog-content\") pod \"redhat-operators-pjtsq\" (UID: \"d3eeaa18-06c9-41be-87c1-fa8e57198da0\") " pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:52:54 crc kubenswrapper[5014]: I1006 22:52:54.426859 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cppvd\" (UniqueName: \"kubernetes.io/projected/d3eeaa18-06c9-41be-87c1-fa8e57198da0-kube-api-access-cppvd\") pod \"redhat-operators-pjtsq\" (UID: \"d3eeaa18-06c9-41be-87c1-fa8e57198da0\") " pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:52:54 crc kubenswrapper[5014]: I1006 22:52:54.528395 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3eeaa18-06c9-41be-87c1-fa8e57198da0-catalog-content\") pod \"redhat-operators-pjtsq\" (UID: \"d3eeaa18-06c9-41be-87c1-fa8e57198da0\") " pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:52:54 crc kubenswrapper[5014]: I1006 22:52:54.528470 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cppvd\" (UniqueName: \"kubernetes.io/projected/d3eeaa18-06c9-41be-87c1-fa8e57198da0-kube-api-access-cppvd\") pod \"redhat-operators-pjtsq\" (UID: \"d3eeaa18-06c9-41be-87c1-fa8e57198da0\") " pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:52:54 crc kubenswrapper[5014]: I1006 22:52:54.528531 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3eeaa18-06c9-41be-87c1-fa8e57198da0-utilities\") pod \"redhat-operators-pjtsq\" (UID: \"d3eeaa18-06c9-41be-87c1-fa8e57198da0\") " pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:52:54 crc kubenswrapper[5014]: I1006 22:52:54.529047 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3eeaa18-06c9-41be-87c1-fa8e57198da0-catalog-content\") pod \"redhat-operators-pjtsq\" (UID: \"d3eeaa18-06c9-41be-87c1-fa8e57198da0\") " pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:52:54 crc kubenswrapper[5014]: I1006 22:52:54.529307 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3eeaa18-06c9-41be-87c1-fa8e57198da0-utilities\") pod \"redhat-operators-pjtsq\" (UID: \"d3eeaa18-06c9-41be-87c1-fa8e57198da0\") " pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:52:54 crc kubenswrapper[5014]: I1006 22:52:54.548419 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cppvd\" (UniqueName: \"kubernetes.io/projected/d3eeaa18-06c9-41be-87c1-fa8e57198da0-kube-api-access-cppvd\") pod \"redhat-operators-pjtsq\" (UID: \"d3eeaa18-06c9-41be-87c1-fa8e57198da0\") " pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:52:54 crc kubenswrapper[5014]: I1006 22:52:54.604189 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:52:55 crc kubenswrapper[5014]: I1006 22:52:55.122225 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pjtsq"] Oct 06 22:52:55 crc kubenswrapper[5014]: I1006 22:52:55.464950 5014 generic.go:334] "Generic (PLEG): container finished" podID="d3eeaa18-06c9-41be-87c1-fa8e57198da0" containerID="f6cf48469dae90561a058176ffdca36d058a7af4a886af972df357e1a569dd3d" exitCode=0 Oct 06 22:52:55 crc kubenswrapper[5014]: I1006 22:52:55.464993 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjtsq" event={"ID":"d3eeaa18-06c9-41be-87c1-fa8e57198da0","Type":"ContainerDied","Data":"f6cf48469dae90561a058176ffdca36d058a7af4a886af972df357e1a569dd3d"} Oct 06 22:52:55 crc kubenswrapper[5014]: I1006 22:52:55.465020 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjtsq" event={"ID":"d3eeaa18-06c9-41be-87c1-fa8e57198da0","Type":"ContainerStarted","Data":"4570db9ea38417d61bbc106b8c0cd6fa791cba2cb17028b375a37ac6214ee40c"} Oct 06 22:52:56 crc kubenswrapper[5014]: I1006 22:52:56.476869 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjtsq" event={"ID":"d3eeaa18-06c9-41be-87c1-fa8e57198da0","Type":"ContainerStarted","Data":"df1251b87b855c1e2e2b66a3584ffac6a9b40c64fcaef53d2ef152bbc9e9ce3e"} Oct 06 22:52:57 crc kubenswrapper[5014]: I1006 22:52:57.487224 5014 generic.go:334] "Generic (PLEG): container finished" podID="d3eeaa18-06c9-41be-87c1-fa8e57198da0" containerID="df1251b87b855c1e2e2b66a3584ffac6a9b40c64fcaef53d2ef152bbc9e9ce3e" exitCode=0 Oct 06 22:52:57 crc kubenswrapper[5014]: I1006 22:52:57.504530 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjtsq" event={"ID":"d3eeaa18-06c9-41be-87c1-fa8e57198da0","Type":"ContainerDied","Data":"df1251b87b855c1e2e2b66a3584ffac6a9b40c64fcaef53d2ef152bbc9e9ce3e"} Oct 06 22:52:58 crc kubenswrapper[5014]: I1006 22:52:58.501797 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjtsq" event={"ID":"d3eeaa18-06c9-41be-87c1-fa8e57198da0","Type":"ContainerStarted","Data":"9c66e7b51113db6e63ddce7196d6c12a1a2fb7c497c444ca6d3d2cfd412e7547"} Oct 06 22:52:58 crc kubenswrapper[5014]: I1006 22:52:58.531043 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pjtsq" podStartSLOduration=1.930489767 podStartE2EDuration="4.531026531s" podCreationTimestamp="2025-10-06 22:52:54 +0000 UTC" firstStartedPulling="2025-10-06 22:52:55.466441023 +0000 UTC m=+4920.759477757" lastFinishedPulling="2025-10-06 22:52:58.066977757 +0000 UTC m=+4923.360014521" observedRunningTime="2025-10-06 22:52:58.530494415 +0000 UTC m=+4923.823531149" watchObservedRunningTime="2025-10-06 22:52:58.531026531 +0000 UTC m=+4923.824063265" Oct 06 22:53:02 crc kubenswrapper[5014]: I1006 22:53:02.537199 5014 generic.go:334] "Generic (PLEG): container finished" podID="f8c2c614-a797-4f9f-94ed-40928e80fabb" containerID="c1b0b7250b2aca3d4010e3a0efe30d3b3c2529f64db97756e81e2a31a4352e3a" exitCode=0 Oct 06 22:53:02 crc kubenswrapper[5014]: I1006 22:53:02.537332 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f8c2c614-a797-4f9f-94ed-40928e80fabb","Type":"ContainerDied","Data":"c1b0b7250b2aca3d4010e3a0efe30d3b3c2529f64db97756e81e2a31a4352e3a"} Oct 06 22:53:02 crc kubenswrapper[5014]: I1006 22:53:02.542198 5014 generic.go:334] "Generic (PLEG): container finished" podID="d6fe3a81-97ea-4544-8747-edfe60d5ba74" containerID="7a3b275003c1e80a1658f457fb1decda90991bd3001dde391bd0e36bba6c9f41" exitCode=0 Oct 06 22:53:02 crc kubenswrapper[5014]: I1006 22:53:02.542270 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d6fe3a81-97ea-4544-8747-edfe60d5ba74","Type":"ContainerDied","Data":"7a3b275003c1e80a1658f457fb1decda90991bd3001dde391bd0e36bba6c9f41"} Oct 06 22:53:03 crc kubenswrapper[5014]: I1006 22:53:03.554146 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f8c2c614-a797-4f9f-94ed-40928e80fabb","Type":"ContainerStarted","Data":"0ce38e851dc67bfd3d5b49c7de43672726ef0720ab7257fba31cd18c3b1b62cf"} Oct 06 22:53:03 crc kubenswrapper[5014]: I1006 22:53:03.554805 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:03 crc kubenswrapper[5014]: I1006 22:53:03.556824 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d6fe3a81-97ea-4544-8747-edfe60d5ba74","Type":"ContainerStarted","Data":"44cb63b4460f3a076d1e22995245a49db394840db32443a76bbbd69db91e9d1a"} Oct 06 22:53:03 crc kubenswrapper[5014]: I1006 22:53:03.557074 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Oct 06 22:53:03 crc kubenswrapper[5014]: I1006 22:53:03.588174 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.588153654 podStartE2EDuration="37.588153654s" podCreationTimestamp="2025-10-06 22:52:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:53:03.584527871 +0000 UTC m=+4928.877564655" watchObservedRunningTime="2025-10-06 22:53:03.588153654 +0000 UTC m=+4928.881190398" Oct 06 22:53:03 crc kubenswrapper[5014]: I1006 22:53:03.624971 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.624934784 podStartE2EDuration="37.624934784s" podCreationTimestamp="2025-10-06 22:52:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:53:03.614958113 +0000 UTC m=+4928.907994877" watchObservedRunningTime="2025-10-06 22:53:03.624934784 +0000 UTC m=+4928.917971558" Oct 06 22:53:04 crc kubenswrapper[5014]: I1006 22:53:04.604409 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:53:04 crc kubenswrapper[5014]: I1006 22:53:04.604490 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:53:04 crc kubenswrapper[5014]: I1006 22:53:04.660426 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:53:05 crc kubenswrapper[5014]: I1006 22:53:05.658677 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:53:05 crc kubenswrapper[5014]: I1006 22:53:05.723702 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pjtsq"] Oct 06 22:53:07 crc kubenswrapper[5014]: I1006 22:53:07.600690 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pjtsq" podUID="d3eeaa18-06c9-41be-87c1-fa8e57198da0" containerName="registry-server" containerID="cri-o://9c66e7b51113db6e63ddce7196d6c12a1a2fb7c497c444ca6d3d2cfd412e7547" gracePeriod=2 Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.197766 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.287404 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cppvd\" (UniqueName: \"kubernetes.io/projected/d3eeaa18-06c9-41be-87c1-fa8e57198da0-kube-api-access-cppvd\") pod \"d3eeaa18-06c9-41be-87c1-fa8e57198da0\" (UID: \"d3eeaa18-06c9-41be-87c1-fa8e57198da0\") " Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.287663 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3eeaa18-06c9-41be-87c1-fa8e57198da0-utilities\") pod \"d3eeaa18-06c9-41be-87c1-fa8e57198da0\" (UID: \"d3eeaa18-06c9-41be-87c1-fa8e57198da0\") " Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.287694 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3eeaa18-06c9-41be-87c1-fa8e57198da0-catalog-content\") pod \"d3eeaa18-06c9-41be-87c1-fa8e57198da0\" (UID: \"d3eeaa18-06c9-41be-87c1-fa8e57198da0\") " Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.289117 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3eeaa18-06c9-41be-87c1-fa8e57198da0-utilities" (OuterVolumeSpecName: "utilities") pod "d3eeaa18-06c9-41be-87c1-fa8e57198da0" (UID: "d3eeaa18-06c9-41be-87c1-fa8e57198da0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.299820 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3eeaa18-06c9-41be-87c1-fa8e57198da0-kube-api-access-cppvd" (OuterVolumeSpecName: "kube-api-access-cppvd") pod "d3eeaa18-06c9-41be-87c1-fa8e57198da0" (UID: "d3eeaa18-06c9-41be-87c1-fa8e57198da0"). InnerVolumeSpecName "kube-api-access-cppvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.377412 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3eeaa18-06c9-41be-87c1-fa8e57198da0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d3eeaa18-06c9-41be-87c1-fa8e57198da0" (UID: "d3eeaa18-06c9-41be-87c1-fa8e57198da0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.389421 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3eeaa18-06c9-41be-87c1-fa8e57198da0-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.389472 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3eeaa18-06c9-41be-87c1-fa8e57198da0-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.389494 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cppvd\" (UniqueName: \"kubernetes.io/projected/d3eeaa18-06c9-41be-87c1-fa8e57198da0-kube-api-access-cppvd\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.614578 5014 generic.go:334] "Generic (PLEG): container finished" podID="d3eeaa18-06c9-41be-87c1-fa8e57198da0" containerID="9c66e7b51113db6e63ddce7196d6c12a1a2fb7c497c444ca6d3d2cfd412e7547" exitCode=0 Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.614679 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjtsq" event={"ID":"d3eeaa18-06c9-41be-87c1-fa8e57198da0","Type":"ContainerDied","Data":"9c66e7b51113db6e63ddce7196d6c12a1a2fb7c497c444ca6d3d2cfd412e7547"} Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.614897 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pjtsq" event={"ID":"d3eeaa18-06c9-41be-87c1-fa8e57198da0","Type":"ContainerDied","Data":"4570db9ea38417d61bbc106b8c0cd6fa791cba2cb17028b375a37ac6214ee40c"} Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.614751 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pjtsq" Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.614943 5014 scope.go:117] "RemoveContainer" containerID="9c66e7b51113db6e63ddce7196d6c12a1a2fb7c497c444ca6d3d2cfd412e7547" Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.665707 5014 scope.go:117] "RemoveContainer" containerID="df1251b87b855c1e2e2b66a3584ffac6a9b40c64fcaef53d2ef152bbc9e9ce3e" Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.669357 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pjtsq"] Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.676656 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pjtsq"] Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.689183 5014 scope.go:117] "RemoveContainer" containerID="f6cf48469dae90561a058176ffdca36d058a7af4a886af972df357e1a569dd3d" Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.727934 5014 scope.go:117] "RemoveContainer" containerID="9c66e7b51113db6e63ddce7196d6c12a1a2fb7c497c444ca6d3d2cfd412e7547" Oct 06 22:53:08 crc kubenswrapper[5014]: E1006 22:53:08.728365 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c66e7b51113db6e63ddce7196d6c12a1a2fb7c497c444ca6d3d2cfd412e7547\": container with ID starting with 9c66e7b51113db6e63ddce7196d6c12a1a2fb7c497c444ca6d3d2cfd412e7547 not found: ID does not exist" containerID="9c66e7b51113db6e63ddce7196d6c12a1a2fb7c497c444ca6d3d2cfd412e7547" Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.728405 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c66e7b51113db6e63ddce7196d6c12a1a2fb7c497c444ca6d3d2cfd412e7547"} err="failed to get container status \"9c66e7b51113db6e63ddce7196d6c12a1a2fb7c497c444ca6d3d2cfd412e7547\": rpc error: code = NotFound desc = could not find container \"9c66e7b51113db6e63ddce7196d6c12a1a2fb7c497c444ca6d3d2cfd412e7547\": container with ID starting with 9c66e7b51113db6e63ddce7196d6c12a1a2fb7c497c444ca6d3d2cfd412e7547 not found: ID does not exist" Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.728432 5014 scope.go:117] "RemoveContainer" containerID="df1251b87b855c1e2e2b66a3584ffac6a9b40c64fcaef53d2ef152bbc9e9ce3e" Oct 06 22:53:08 crc kubenswrapper[5014]: E1006 22:53:08.730569 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df1251b87b855c1e2e2b66a3584ffac6a9b40c64fcaef53d2ef152bbc9e9ce3e\": container with ID starting with df1251b87b855c1e2e2b66a3584ffac6a9b40c64fcaef53d2ef152bbc9e9ce3e not found: ID does not exist" containerID="df1251b87b855c1e2e2b66a3584ffac6a9b40c64fcaef53d2ef152bbc9e9ce3e" Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.730663 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df1251b87b855c1e2e2b66a3584ffac6a9b40c64fcaef53d2ef152bbc9e9ce3e"} err="failed to get container status \"df1251b87b855c1e2e2b66a3584ffac6a9b40c64fcaef53d2ef152bbc9e9ce3e\": rpc error: code = NotFound desc = could not find container \"df1251b87b855c1e2e2b66a3584ffac6a9b40c64fcaef53d2ef152bbc9e9ce3e\": container with ID starting with df1251b87b855c1e2e2b66a3584ffac6a9b40c64fcaef53d2ef152bbc9e9ce3e not found: ID does not exist" Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.730740 5014 scope.go:117] "RemoveContainer" containerID="f6cf48469dae90561a058176ffdca36d058a7af4a886af972df357e1a569dd3d" Oct 06 22:53:08 crc kubenswrapper[5014]: E1006 22:53:08.731041 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6cf48469dae90561a058176ffdca36d058a7af4a886af972df357e1a569dd3d\": container with ID starting with f6cf48469dae90561a058176ffdca36d058a7af4a886af972df357e1a569dd3d not found: ID does not exist" containerID="f6cf48469dae90561a058176ffdca36d058a7af4a886af972df357e1a569dd3d" Oct 06 22:53:08 crc kubenswrapper[5014]: I1006 22:53:08.731069 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6cf48469dae90561a058176ffdca36d058a7af4a886af972df357e1a569dd3d"} err="failed to get container status \"f6cf48469dae90561a058176ffdca36d058a7af4a886af972df357e1a569dd3d\": rpc error: code = NotFound desc = could not find container \"f6cf48469dae90561a058176ffdca36d058a7af4a886af972df357e1a569dd3d\": container with ID starting with f6cf48469dae90561a058176ffdca36d058a7af4a886af972df357e1a569dd3d not found: ID does not exist" Oct 06 22:53:09 crc kubenswrapper[5014]: I1006 22:53:09.495653 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3eeaa18-06c9-41be-87c1-fa8e57198da0" path="/var/lib/kubelet/pods/d3eeaa18-06c9-41be-87c1-fa8e57198da0/volumes" Oct 06 22:53:17 crc kubenswrapper[5014]: I1006 22:53:17.861893 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Oct 06 22:53:18 crc kubenswrapper[5014]: I1006 22:53:18.148879 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.239293 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5fdc957c47-knrqp"] Oct 06 22:53:23 crc kubenswrapper[5014]: E1006 22:53:23.243596 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3eeaa18-06c9-41be-87c1-fa8e57198da0" containerName="extract-content" Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.243640 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3eeaa18-06c9-41be-87c1-fa8e57198da0" containerName="extract-content" Oct 06 22:53:23 crc kubenswrapper[5014]: E1006 22:53:23.243676 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3eeaa18-06c9-41be-87c1-fa8e57198da0" containerName="extract-utilities" Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.243684 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3eeaa18-06c9-41be-87c1-fa8e57198da0" containerName="extract-utilities" Oct 06 22:53:23 crc kubenswrapper[5014]: E1006 22:53:23.243698 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3eeaa18-06c9-41be-87c1-fa8e57198da0" containerName="registry-server" Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.243707 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3eeaa18-06c9-41be-87c1-fa8e57198da0" containerName="registry-server" Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.243888 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3eeaa18-06c9-41be-87c1-fa8e57198da0" containerName="registry-server" Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.244918 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.250281 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fdc957c47-knrqp"] Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.361921 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c45fd94-0ec2-4683-a66b-aef65f906ca5-config\") pod \"dnsmasq-dns-5fdc957c47-knrqp\" (UID: \"0c45fd94-0ec2-4683-a66b-aef65f906ca5\") " pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.362092 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qxrn\" (UniqueName: \"kubernetes.io/projected/0c45fd94-0ec2-4683-a66b-aef65f906ca5-kube-api-access-7qxrn\") pod \"dnsmasq-dns-5fdc957c47-knrqp\" (UID: \"0c45fd94-0ec2-4683-a66b-aef65f906ca5\") " pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.362250 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0c45fd94-0ec2-4683-a66b-aef65f906ca5-dns-svc\") pod \"dnsmasq-dns-5fdc957c47-knrqp\" (UID: \"0c45fd94-0ec2-4683-a66b-aef65f906ca5\") " pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.463481 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c45fd94-0ec2-4683-a66b-aef65f906ca5-config\") pod \"dnsmasq-dns-5fdc957c47-knrqp\" (UID: \"0c45fd94-0ec2-4683-a66b-aef65f906ca5\") " pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.463567 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qxrn\" (UniqueName: \"kubernetes.io/projected/0c45fd94-0ec2-4683-a66b-aef65f906ca5-kube-api-access-7qxrn\") pod \"dnsmasq-dns-5fdc957c47-knrqp\" (UID: \"0c45fd94-0ec2-4683-a66b-aef65f906ca5\") " pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.463654 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0c45fd94-0ec2-4683-a66b-aef65f906ca5-dns-svc\") pod \"dnsmasq-dns-5fdc957c47-knrqp\" (UID: \"0c45fd94-0ec2-4683-a66b-aef65f906ca5\") " pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.464839 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c45fd94-0ec2-4683-a66b-aef65f906ca5-config\") pod \"dnsmasq-dns-5fdc957c47-knrqp\" (UID: \"0c45fd94-0ec2-4683-a66b-aef65f906ca5\") " pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.464884 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0c45fd94-0ec2-4683-a66b-aef65f906ca5-dns-svc\") pod \"dnsmasq-dns-5fdc957c47-knrqp\" (UID: \"0c45fd94-0ec2-4683-a66b-aef65f906ca5\") " pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.489034 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qxrn\" (UniqueName: \"kubernetes.io/projected/0c45fd94-0ec2-4683-a66b-aef65f906ca5-kube-api-access-7qxrn\") pod \"dnsmasq-dns-5fdc957c47-knrqp\" (UID: \"0c45fd94-0ec2-4683-a66b-aef65f906ca5\") " pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" Oct 06 22:53:23 crc kubenswrapper[5014]: I1006 22:53:23.569416 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" Oct 06 22:53:24 crc kubenswrapper[5014]: I1006 22:53:24.001134 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fdc957c47-knrqp"] Oct 06 22:53:24 crc kubenswrapper[5014]: I1006 22:53:24.224331 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 06 22:53:24 crc kubenswrapper[5014]: I1006 22:53:24.774591 5014 generic.go:334] "Generic (PLEG): container finished" podID="0c45fd94-0ec2-4683-a66b-aef65f906ca5" containerID="a5320a55f37ebf1e8db77bf25880f6585e8867d1bd558b5063a580cb49ebdc7e" exitCode=0 Oct 06 22:53:24 crc kubenswrapper[5014]: I1006 22:53:24.774664 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" event={"ID":"0c45fd94-0ec2-4683-a66b-aef65f906ca5","Type":"ContainerDied","Data":"a5320a55f37ebf1e8db77bf25880f6585e8867d1bd558b5063a580cb49ebdc7e"} Oct 06 22:53:24 crc kubenswrapper[5014]: I1006 22:53:24.774991 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" event={"ID":"0c45fd94-0ec2-4683-a66b-aef65f906ca5","Type":"ContainerStarted","Data":"0245d83e5db0d5028a5352b4236d4ddfca70668d5e8f7716f9ae489d82b1ced3"} Oct 06 22:53:24 crc kubenswrapper[5014]: I1006 22:53:24.961769 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 06 22:53:25 crc kubenswrapper[5014]: I1006 22:53:25.783726 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" event={"ID":"0c45fd94-0ec2-4683-a66b-aef65f906ca5","Type":"ContainerStarted","Data":"5bf86b5cf828a134f35eab1b7203f0bd99ab1424df9e70da39f698064d6ffa0a"} Oct 06 22:53:25 crc kubenswrapper[5014]: I1006 22:53:25.784060 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" Oct 06 22:53:25 crc kubenswrapper[5014]: I1006 22:53:25.804389 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" podStartSLOduration=2.804370737 podStartE2EDuration="2.804370737s" podCreationTimestamp="2025-10-06 22:53:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:53:25.797589755 +0000 UTC m=+4951.090626509" watchObservedRunningTime="2025-10-06 22:53:25.804370737 +0000 UTC m=+4951.097407471" Oct 06 22:53:28 crc kubenswrapper[5014]: I1006 22:53:28.575274 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="d6fe3a81-97ea-4544-8747-edfe60d5ba74" containerName="rabbitmq" containerID="cri-o://44cb63b4460f3a076d1e22995245a49db394840db32443a76bbbd69db91e9d1a" gracePeriod=604796 Oct 06 22:53:29 crc kubenswrapper[5014]: I1006 22:53:29.096255 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="f8c2c614-a797-4f9f-94ed-40928e80fabb" containerName="rabbitmq" containerID="cri-o://0ce38e851dc67bfd3d5b49c7de43672726ef0720ab7257fba31cd18c3b1b62cf" gracePeriod=604796 Oct 06 22:53:33 crc kubenswrapper[5014]: I1006 22:53:33.571933 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" Oct 06 22:53:33 crc kubenswrapper[5014]: I1006 22:53:33.655966 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67d9f7fb89-2rzgz"] Oct 06 22:53:33 crc kubenswrapper[5014]: I1006 22:53:33.656552 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" podUID="bdd26dc8-c018-4079-9038-ca724382fc8f" containerName="dnsmasq-dns" containerID="cri-o://e02f8ffc6d119061a9c1bc58ac14197924b401566d3ac21999e06fe6640f3553" gracePeriod=10 Oct 06 22:53:33 crc kubenswrapper[5014]: I1006 22:53:33.878971 5014 generic.go:334] "Generic (PLEG): container finished" podID="bdd26dc8-c018-4079-9038-ca724382fc8f" containerID="e02f8ffc6d119061a9c1bc58ac14197924b401566d3ac21999e06fe6640f3553" exitCode=0 Oct 06 22:53:33 crc kubenswrapper[5014]: I1006 22:53:33.879015 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" event={"ID":"bdd26dc8-c018-4079-9038-ca724382fc8f","Type":"ContainerDied","Data":"e02f8ffc6d119061a9c1bc58ac14197924b401566d3ac21999e06fe6640f3553"} Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.116642 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.238672 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mt92s\" (UniqueName: \"kubernetes.io/projected/bdd26dc8-c018-4079-9038-ca724382fc8f-kube-api-access-mt92s\") pod \"bdd26dc8-c018-4079-9038-ca724382fc8f\" (UID: \"bdd26dc8-c018-4079-9038-ca724382fc8f\") " Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.238980 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bdd26dc8-c018-4079-9038-ca724382fc8f-config\") pod \"bdd26dc8-c018-4079-9038-ca724382fc8f\" (UID: \"bdd26dc8-c018-4079-9038-ca724382fc8f\") " Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.239045 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bdd26dc8-c018-4079-9038-ca724382fc8f-dns-svc\") pod \"bdd26dc8-c018-4079-9038-ca724382fc8f\" (UID: \"bdd26dc8-c018-4079-9038-ca724382fc8f\") " Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.255166 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bdd26dc8-c018-4079-9038-ca724382fc8f-kube-api-access-mt92s" (OuterVolumeSpecName: "kube-api-access-mt92s") pod "bdd26dc8-c018-4079-9038-ca724382fc8f" (UID: "bdd26dc8-c018-4079-9038-ca724382fc8f"). InnerVolumeSpecName "kube-api-access-mt92s". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.282566 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bdd26dc8-c018-4079-9038-ca724382fc8f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bdd26dc8-c018-4079-9038-ca724382fc8f" (UID: "bdd26dc8-c018-4079-9038-ca724382fc8f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.292074 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bdd26dc8-c018-4079-9038-ca724382fc8f-config" (OuterVolumeSpecName: "config") pod "bdd26dc8-c018-4079-9038-ca724382fc8f" (UID: "bdd26dc8-c018-4079-9038-ca724382fc8f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.341324 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bdd26dc8-c018-4079-9038-ca724382fc8f-config\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.341373 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bdd26dc8-c018-4079-9038-ca724382fc8f-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.341393 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mt92s\" (UniqueName: \"kubernetes.io/projected/bdd26dc8-c018-4079-9038-ca724382fc8f-kube-api-access-mt92s\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.889532 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" event={"ID":"bdd26dc8-c018-4079-9038-ca724382fc8f","Type":"ContainerDied","Data":"23baef8d4309a62c8f0773566eeb02da086f8c75d07af31b9d34389bae9cc172"} Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.889584 5014 scope.go:117] "RemoveContainer" containerID="e02f8ffc6d119061a9c1bc58ac14197924b401566d3ac21999e06fe6640f3553" Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.889614 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67d9f7fb89-2rzgz" Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.895038 5014 generic.go:334] "Generic (PLEG): container finished" podID="d6fe3a81-97ea-4544-8747-edfe60d5ba74" containerID="44cb63b4460f3a076d1e22995245a49db394840db32443a76bbbd69db91e9d1a" exitCode=0 Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.895117 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d6fe3a81-97ea-4544-8747-edfe60d5ba74","Type":"ContainerDied","Data":"44cb63b4460f3a076d1e22995245a49db394840db32443a76bbbd69db91e9d1a"} Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.922268 5014 scope.go:117] "RemoveContainer" containerID="f60e074e49b4de4096e4b43cd27f086e38957c24a5daaa3cf51255f305a9e63a" Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.965350 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67d9f7fb89-2rzgz"] Oct 06 22:53:34 crc kubenswrapper[5014]: I1006 22:53:34.976080 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-67d9f7fb89-2rzgz"] Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.374559 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.482429 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de504b32-6a0f-469c-acf1-4778995e167d\") pod \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.482479 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-plugins\") pod \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.482546 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbrhz\" (UniqueName: \"kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-kube-api-access-tbrhz\") pod \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.482587 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d6fe3a81-97ea-4544-8747-edfe60d5ba74-erlang-cookie-secret\") pod \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.482716 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d6fe3a81-97ea-4544-8747-edfe60d5ba74-pod-info\") pod \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.482779 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-server-conf\") pod \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.482832 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-confd\") pod \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.483675 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-tls\") pod \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.483773 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-erlang-cookie\") pod \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.483780 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "d6fe3a81-97ea-4544-8747-edfe60d5ba74" (UID: "d6fe3a81-97ea-4544-8747-edfe60d5ba74"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.483807 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-config-data\") pod \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.483884 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-plugins-conf\") pod \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\" (UID: \"d6fe3a81-97ea-4544-8747-edfe60d5ba74\") " Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.484326 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "d6fe3a81-97ea-4544-8747-edfe60d5ba74" (UID: "d6fe3a81-97ea-4544-8747-edfe60d5ba74"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.485271 5014 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.485290 5014 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.487732 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-kube-api-access-tbrhz" (OuterVolumeSpecName: "kube-api-access-tbrhz") pod "d6fe3a81-97ea-4544-8747-edfe60d5ba74" (UID: "d6fe3a81-97ea-4544-8747-edfe60d5ba74"). InnerVolumeSpecName "kube-api-access-tbrhz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.490310 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "d6fe3a81-97ea-4544-8747-edfe60d5ba74" (UID: "d6fe3a81-97ea-4544-8747-edfe60d5ba74"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.492479 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6fe3a81-97ea-4544-8747-edfe60d5ba74-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "d6fe3a81-97ea-4544-8747-edfe60d5ba74" (UID: "d6fe3a81-97ea-4544-8747-edfe60d5ba74"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.493330 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/d6fe3a81-97ea-4544-8747-edfe60d5ba74-pod-info" (OuterVolumeSpecName: "pod-info") pod "d6fe3a81-97ea-4544-8747-edfe60d5ba74" (UID: "d6fe3a81-97ea-4544-8747-edfe60d5ba74"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.499142 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bdd26dc8-c018-4079-9038-ca724382fc8f" path="/var/lib/kubelet/pods/bdd26dc8-c018-4079-9038-ca724382fc8f/volumes" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.505649 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "d6fe3a81-97ea-4544-8747-edfe60d5ba74" (UID: "d6fe3a81-97ea-4544-8747-edfe60d5ba74"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.509221 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de504b32-6a0f-469c-acf1-4778995e167d" (OuterVolumeSpecName: "persistence") pod "d6fe3a81-97ea-4544-8747-edfe60d5ba74" (UID: "d6fe3a81-97ea-4544-8747-edfe60d5ba74"). InnerVolumeSpecName "pvc-de504b32-6a0f-469c-acf1-4778995e167d". PluginName "kubernetes.io/csi", VolumeGidValue "" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.528936 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-config-data" (OuterVolumeSpecName: "config-data") pod "d6fe3a81-97ea-4544-8747-edfe60d5ba74" (UID: "d6fe3a81-97ea-4544-8747-edfe60d5ba74"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.534200 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-server-conf" (OuterVolumeSpecName: "server-conf") pod "d6fe3a81-97ea-4544-8747-edfe60d5ba74" (UID: "d6fe3a81-97ea-4544-8747-edfe60d5ba74"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.563472 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "d6fe3a81-97ea-4544-8747-edfe60d5ba74" (UID: "d6fe3a81-97ea-4544-8747-edfe60d5ba74"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.586396 5014 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-plugins-conf\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.586458 5014 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-de504b32-6a0f-469c-acf1-4778995e167d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de504b32-6a0f-469c-acf1-4778995e167d\") on node \"crc\" " Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.586470 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tbrhz\" (UniqueName: \"kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-kube-api-access-tbrhz\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.586480 5014 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d6fe3a81-97ea-4544-8747-edfe60d5ba74-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.586488 5014 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d6fe3a81-97ea-4544-8747-edfe60d5ba74-pod-info\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.586516 5014 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-server-conf\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.586525 5014 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.586532 5014 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d6fe3a81-97ea-4544-8747-edfe60d5ba74-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.586541 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d6fe3a81-97ea-4544-8747-edfe60d5ba74-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.601397 5014 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.601545 5014 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-de504b32-6a0f-469c-acf1-4778995e167d" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de504b32-6a0f-469c-acf1-4778995e167d") on node "crc" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.688511 5014 reconciler_common.go:293] "Volume detached for volume \"pvc-de504b32-6a0f-469c-acf1-4778995e167d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de504b32-6a0f-469c-acf1-4778995e167d\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.924771 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"d6fe3a81-97ea-4544-8747-edfe60d5ba74","Type":"ContainerDied","Data":"1893ac85db2049b821abf2dbd297be005adf889075518d1c5b302f2823c02a71"} Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.926333 5014 scope.go:117] "RemoveContainer" containerID="44cb63b4460f3a076d1e22995245a49db394840db32443a76bbbd69db91e9d1a" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.925049 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.930144 5014 generic.go:334] "Generic (PLEG): container finished" podID="f8c2c614-a797-4f9f-94ed-40928e80fabb" containerID="0ce38e851dc67bfd3d5b49c7de43672726ef0720ab7257fba31cd18c3b1b62cf" exitCode=0 Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.930200 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f8c2c614-a797-4f9f-94ed-40928e80fabb","Type":"ContainerDied","Data":"0ce38e851dc67bfd3d5b49c7de43672726ef0720ab7257fba31cd18c3b1b62cf"} Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.957324 5014 scope.go:117] "RemoveContainer" containerID="7a3b275003c1e80a1658f457fb1decda90991bd3001dde391bd0e36bba6c9f41" Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.980300 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 06 22:53:35 crc kubenswrapper[5014]: I1006 22:53:35.980361 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.001413 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Oct 06 22:53:36 crc kubenswrapper[5014]: E1006 22:53:36.001835 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdd26dc8-c018-4079-9038-ca724382fc8f" containerName="init" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.001861 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdd26dc8-c018-4079-9038-ca724382fc8f" containerName="init" Oct 06 22:53:36 crc kubenswrapper[5014]: E1006 22:53:36.001876 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdd26dc8-c018-4079-9038-ca724382fc8f" containerName="dnsmasq-dns" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.001885 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdd26dc8-c018-4079-9038-ca724382fc8f" containerName="dnsmasq-dns" Oct 06 22:53:36 crc kubenswrapper[5014]: E1006 22:53:36.001906 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6fe3a81-97ea-4544-8747-edfe60d5ba74" containerName="rabbitmq" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.001915 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6fe3a81-97ea-4544-8747-edfe60d5ba74" containerName="rabbitmq" Oct 06 22:53:36 crc kubenswrapper[5014]: E1006 22:53:36.001941 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6fe3a81-97ea-4544-8747-edfe60d5ba74" containerName="setup-container" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.001949 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6fe3a81-97ea-4544-8747-edfe60d5ba74" containerName="setup-container" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.002118 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6fe3a81-97ea-4544-8747-edfe60d5ba74" containerName="rabbitmq" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.002144 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdd26dc8-c018-4079-9038-ca724382fc8f" containerName="dnsmasq-dns" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.002983 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.012053 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.013467 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.013789 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.013980 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.014474 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.016149 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-mmvsr" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.016230 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.034509 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.098302 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.102833 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/aa637783-440d-4a1b-ada6-6bbf5be1bc84-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.102897 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/aa637783-440d-4a1b-ada6-6bbf5be1bc84-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.103014 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/aa637783-440d-4a1b-ada6-6bbf5be1bc84-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.103073 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/aa637783-440d-4a1b-ada6-6bbf5be1bc84-pod-info\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.103162 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xs868\" (UniqueName: \"kubernetes.io/projected/aa637783-440d-4a1b-ada6-6bbf5be1bc84-kube-api-access-xs868\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.103197 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/aa637783-440d-4a1b-ada6-6bbf5be1bc84-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.103260 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/aa637783-440d-4a1b-ada6-6bbf5be1bc84-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.103314 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-de504b32-6a0f-469c-acf1-4778995e167d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de504b32-6a0f-469c-acf1-4778995e167d\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.103457 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/aa637783-440d-4a1b-ada6-6bbf5be1bc84-server-conf\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.103498 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aa637783-440d-4a1b-ada6-6bbf5be1bc84-config-data\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.103523 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/aa637783-440d-4a1b-ada6-6bbf5be1bc84-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204182 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-plugins\") pod \"f8c2c614-a797-4f9f-94ed-40928e80fabb\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204240 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f8c2c614-a797-4f9f-94ed-40928e80fabb-pod-info\") pod \"f8c2c614-a797-4f9f-94ed-40928e80fabb\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204286 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-confd\") pod \"f8c2c614-a797-4f9f-94ed-40928e80fabb\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204307 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-erlang-cookie\") pod \"f8c2c614-a797-4f9f-94ed-40928e80fabb\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204372 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-68dzr\" (UniqueName: \"kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-kube-api-access-68dzr\") pod \"f8c2c614-a797-4f9f-94ed-40928e80fabb\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204404 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f8c2c614-a797-4f9f-94ed-40928e80fabb-erlang-cookie-secret\") pod \"f8c2c614-a797-4f9f-94ed-40928e80fabb\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204428 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-tls\") pod \"f8c2c614-a797-4f9f-94ed-40928e80fabb\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204446 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-config-data\") pod \"f8c2c614-a797-4f9f-94ed-40928e80fabb\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204501 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-server-conf\") pod \"f8c2c614-a797-4f9f-94ed-40928e80fabb\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204689 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-feeffabf-0180-4772-a39d-d981e979241d\") pod \"f8c2c614-a797-4f9f-94ed-40928e80fabb\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204726 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-plugins-conf\") pod \"f8c2c614-a797-4f9f-94ed-40928e80fabb\" (UID: \"f8c2c614-a797-4f9f-94ed-40928e80fabb\") " Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204726 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "f8c2c614-a797-4f9f-94ed-40928e80fabb" (UID: "f8c2c614-a797-4f9f-94ed-40928e80fabb"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204866 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/aa637783-440d-4a1b-ada6-6bbf5be1bc84-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204904 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/aa637783-440d-4a1b-ada6-6bbf5be1bc84-pod-info\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204930 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xs868\" (UniqueName: \"kubernetes.io/projected/aa637783-440d-4a1b-ada6-6bbf5be1bc84-kube-api-access-xs868\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204950 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/aa637783-440d-4a1b-ada6-6bbf5be1bc84-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.204973 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/aa637783-440d-4a1b-ada6-6bbf5be1bc84-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.205016 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-de504b32-6a0f-469c-acf1-4778995e167d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de504b32-6a0f-469c-acf1-4778995e167d\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.205068 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/aa637783-440d-4a1b-ada6-6bbf5be1bc84-server-conf\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.205088 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aa637783-440d-4a1b-ada6-6bbf5be1bc84-config-data\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.205106 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/aa637783-440d-4a1b-ada6-6bbf5be1bc84-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.205138 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/aa637783-440d-4a1b-ada6-6bbf5be1bc84-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.205161 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/aa637783-440d-4a1b-ada6-6bbf5be1bc84-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.205221 5014 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.205547 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/aa637783-440d-4a1b-ada6-6bbf5be1bc84-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.206560 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "f8c2c614-a797-4f9f-94ed-40928e80fabb" (UID: "f8c2c614-a797-4f9f-94ed-40928e80fabb"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.207667 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/f8c2c614-a797-4f9f-94ed-40928e80fabb-pod-info" (OuterVolumeSpecName: "pod-info") pod "f8c2c614-a797-4f9f-94ed-40928e80fabb" (UID: "f8c2c614-a797-4f9f-94ed-40928e80fabb"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.208663 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/aa637783-440d-4a1b-ada6-6bbf5be1bc84-server-conf\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.208679 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8c2c614-a797-4f9f-94ed-40928e80fabb-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "f8c2c614-a797-4f9f-94ed-40928e80fabb" (UID: "f8c2c614-a797-4f9f-94ed-40928e80fabb"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.209301 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "f8c2c614-a797-4f9f-94ed-40928e80fabb" (UID: "f8c2c614-a797-4f9f-94ed-40928e80fabb"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.209539 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/aa637783-440d-4a1b-ada6-6bbf5be1bc84-pod-info\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.209843 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aa637783-440d-4a1b-ada6-6bbf5be1bc84-config-data\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.209846 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/aa637783-440d-4a1b-ada6-6bbf5be1bc84-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.210784 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/aa637783-440d-4a1b-ada6-6bbf5be1bc84-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.212367 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "f8c2c614-a797-4f9f-94ed-40928e80fabb" (UID: "f8c2c614-a797-4f9f-94ed-40928e80fabb"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.212756 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-kube-api-access-68dzr" (OuterVolumeSpecName: "kube-api-access-68dzr") pod "f8c2c614-a797-4f9f-94ed-40928e80fabb" (UID: "f8c2c614-a797-4f9f-94ed-40928e80fabb"). InnerVolumeSpecName "kube-api-access-68dzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.214928 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/aa637783-440d-4a1b-ada6-6bbf5be1bc84-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.215425 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-feeffabf-0180-4772-a39d-d981e979241d" (OuterVolumeSpecName: "persistence") pod "f8c2c614-a797-4f9f-94ed-40928e80fabb" (UID: "f8c2c614-a797-4f9f-94ed-40928e80fabb"). InnerVolumeSpecName "pvc-feeffabf-0180-4772-a39d-d981e979241d". PluginName "kubernetes.io/csi", VolumeGidValue "" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.216237 5014 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.216388 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-de504b32-6a0f-469c-acf1-4778995e167d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de504b32-6a0f-469c-acf1-4778995e167d\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b3f573573d4185f181f377a5e7547ecfec13579b863acd5df80498214dc179e0/globalmount\"" pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.218051 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/aa637783-440d-4a1b-ada6-6bbf5be1bc84-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.220567 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/aa637783-440d-4a1b-ada6-6bbf5be1bc84-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.232273 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xs868\" (UniqueName: \"kubernetes.io/projected/aa637783-440d-4a1b-ada6-6bbf5be1bc84-kube-api-access-xs868\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.242393 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-config-data" (OuterVolumeSpecName: "config-data") pod "f8c2c614-a797-4f9f-94ed-40928e80fabb" (UID: "f8c2c614-a797-4f9f-94ed-40928e80fabb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.250415 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-server-conf" (OuterVolumeSpecName: "server-conf") pod "f8c2c614-a797-4f9f-94ed-40928e80fabb" (UID: "f8c2c614-a797-4f9f-94ed-40928e80fabb"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.258994 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-de504b32-6a0f-469c-acf1-4778995e167d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-de504b32-6a0f-469c-acf1-4778995e167d\") pod \"rabbitmq-server-0\" (UID: \"aa637783-440d-4a1b-ada6-6bbf5be1bc84\") " pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.296566 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "f8c2c614-a797-4f9f-94ed-40928e80fabb" (UID: "f8c2c614-a797-4f9f-94ed-40928e80fabb"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.306547 5014 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.306576 5014 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.306591 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-68dzr\" (UniqueName: \"kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-kube-api-access-68dzr\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.306601 5014 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f8c2c614-a797-4f9f-94ed-40928e80fabb-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.306613 5014 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f8c2c614-a797-4f9f-94ed-40928e80fabb-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.306640 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.306650 5014 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-server-conf\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.306702 5014 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-feeffabf-0180-4772-a39d-d981e979241d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-feeffabf-0180-4772-a39d-d981e979241d\") on node \"crc\" " Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.306717 5014 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f8c2c614-a797-4f9f-94ed-40928e80fabb-plugins-conf\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.306728 5014 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f8c2c614-a797-4f9f-94ed-40928e80fabb-pod-info\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.324326 5014 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.324506 5014 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-feeffabf-0180-4772-a39d-d981e979241d" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-feeffabf-0180-4772-a39d-d981e979241d") on node "crc" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.396967 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.407812 5014 reconciler_common.go:293] "Volume detached for volume \"pvc-feeffabf-0180-4772-a39d-d981e979241d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-feeffabf-0180-4772-a39d-d981e979241d\") on node \"crc\" DevicePath \"\"" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.657469 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 06 22:53:36 crc kubenswrapper[5014]: W1006 22:53:36.662236 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaa637783_440d_4a1b_ada6_6bbf5be1bc84.slice/crio-3a0637c7508640b664273ca8fd9af0e3d6c9c1c8f84b37f5c9f9278dea3ff25d WatchSource:0}: Error finding container 3a0637c7508640b664273ca8fd9af0e3d6c9c1c8f84b37f5c9f9278dea3ff25d: Status 404 returned error can't find the container with id 3a0637c7508640b664273ca8fd9af0e3d6c9c1c8f84b37f5c9f9278dea3ff25d Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.940441 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f8c2c614-a797-4f9f-94ed-40928e80fabb","Type":"ContainerDied","Data":"af1021d1652c761cd4501206f46a8386593b81e3dd3b1538ffd11764b1f025d4"} Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.940488 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.940849 5014 scope.go:117] "RemoveContainer" containerID="0ce38e851dc67bfd3d5b49c7de43672726ef0720ab7257fba31cd18c3b1b62cf" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.943114 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"aa637783-440d-4a1b-ada6-6bbf5be1bc84","Type":"ContainerStarted","Data":"3a0637c7508640b664273ca8fd9af0e3d6c9c1c8f84b37f5c9f9278dea3ff25d"} Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.962769 5014 scope.go:117] "RemoveContainer" containerID="c1b0b7250b2aca3d4010e3a0efe30d3b3c2529f64db97756e81e2a31a4352e3a" Oct 06 22:53:36 crc kubenswrapper[5014]: I1006 22:53:36.999685 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.006095 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.079790 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 06 22:53:37 crc kubenswrapper[5014]: E1006 22:53:37.080223 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8c2c614-a797-4f9f-94ed-40928e80fabb" containerName="setup-container" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.080248 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8c2c614-a797-4f9f-94ed-40928e80fabb" containerName="setup-container" Oct 06 22:53:37 crc kubenswrapper[5014]: E1006 22:53:37.080269 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8c2c614-a797-4f9f-94ed-40928e80fabb" containerName="rabbitmq" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.080278 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8c2c614-a797-4f9f-94ed-40928e80fabb" containerName="rabbitmq" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.080476 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8c2c614-a797-4f9f-94ed-40928e80fabb" containerName="rabbitmq" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.081522 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.083297 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.085080 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.085482 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.085773 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.086206 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-v28mf" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.086547 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.086806 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.086804 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.218421 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.218454 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.218492 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6fww\" (UniqueName: \"kubernetes.io/projected/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-kube-api-access-r6fww\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.218515 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.218535 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-feeffabf-0180-4772-a39d-d981e979241d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-feeffabf-0180-4772-a39d-d981e979241d\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.218553 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.218582 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.218601 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.218652 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.218680 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.218707 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.320539 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.320654 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.320726 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6fww\" (UniqueName: \"kubernetes.io/projected/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-kube-api-access-r6fww\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.320798 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.320837 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-feeffabf-0180-4772-a39d-d981e979241d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-feeffabf-0180-4772-a39d-d981e979241d\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.320877 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.320947 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.321001 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.321067 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.321500 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.321927 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.323286 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.324005 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.325101 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.325767 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.326301 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.328788 5014 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.329235 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-feeffabf-0180-4772-a39d-d981e979241d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-feeffabf-0180-4772-a39d-d981e979241d\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/d8fdada76f3df58e0c1d0071a9e2c1d2e798bdf0cb0b90a59d90c980b534487e/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.328819 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.330705 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.332603 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.339685 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.354561 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6fww\" (UniqueName: \"kubernetes.io/projected/ece821f8-f5e4-4e03-8dd6-fab61ba9eca9-kube-api-access-r6fww\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.389676 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-feeffabf-0180-4772-a39d-d981e979241d\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-feeffabf-0180-4772-a39d-d981e979241d\") pod \"rabbitmq-cell1-server-0\" (UID: \"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9\") " pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.507219 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6fe3a81-97ea-4544-8747-edfe60d5ba74" path="/var/lib/kubelet/pods/d6fe3a81-97ea-4544-8747-edfe60d5ba74/volumes" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.509171 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8c2c614-a797-4f9f-94ed-40928e80fabb" path="/var/lib/kubelet/pods/f8c2c614-a797-4f9f-94ed-40928e80fabb/volumes" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.697495 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:53:37 crc kubenswrapper[5014]: I1006 22:53:37.959463 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"aa637783-440d-4a1b-ada6-6bbf5be1bc84","Type":"ContainerStarted","Data":"388b94eda2fcd3b5a75fff8db717a351c9849974fc1a82db8f08bef411e01118"} Oct 06 22:53:38 crc kubenswrapper[5014]: I1006 22:53:38.234493 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 06 22:53:38 crc kubenswrapper[5014]: W1006 22:53:38.253981 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podece821f8_f5e4_4e03_8dd6_fab61ba9eca9.slice/crio-5913be869f57164e7795317c0c7f20e891c57b4e3fd701b60aeb87ebc4e29ef3 WatchSource:0}: Error finding container 5913be869f57164e7795317c0c7f20e891c57b4e3fd701b60aeb87ebc4e29ef3: Status 404 returned error can't find the container with id 5913be869f57164e7795317c0c7f20e891c57b4e3fd701b60aeb87ebc4e29ef3 Oct 06 22:53:38 crc kubenswrapper[5014]: I1006 22:53:38.967859 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9","Type":"ContainerStarted","Data":"5913be869f57164e7795317c0c7f20e891c57b4e3fd701b60aeb87ebc4e29ef3"} Oct 06 22:53:40 crc kubenswrapper[5014]: I1006 22:53:40.989061 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9","Type":"ContainerStarted","Data":"1635a0022de2746cd72301caa5c4047eb93d94e2436cad8a48c0ea6d7e27326c"} Oct 06 22:53:51 crc kubenswrapper[5014]: I1006 22:53:51.735760 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:53:51 crc kubenswrapper[5014]: I1006 22:53:51.736493 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:54:12 crc kubenswrapper[5014]: I1006 22:54:12.309394 5014 generic.go:334] "Generic (PLEG): container finished" podID="aa637783-440d-4a1b-ada6-6bbf5be1bc84" containerID="388b94eda2fcd3b5a75fff8db717a351c9849974fc1a82db8f08bef411e01118" exitCode=0 Oct 06 22:54:12 crc kubenswrapper[5014]: I1006 22:54:12.309508 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"aa637783-440d-4a1b-ada6-6bbf5be1bc84","Type":"ContainerDied","Data":"388b94eda2fcd3b5a75fff8db717a351c9849974fc1a82db8f08bef411e01118"} Oct 06 22:54:13 crc kubenswrapper[5014]: I1006 22:54:13.324957 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"aa637783-440d-4a1b-ada6-6bbf5be1bc84","Type":"ContainerStarted","Data":"bf30db74596e7e277909fd05e4bfc95d6771c0229ee8c43b8e2aae48cdb6c3c0"} Oct 06 22:54:13 crc kubenswrapper[5014]: I1006 22:54:13.325858 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Oct 06 22:54:13 crc kubenswrapper[5014]: I1006 22:54:13.372066 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.372048134 podStartE2EDuration="38.372048134s" podCreationTimestamp="2025-10-06 22:53:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:54:13.369748052 +0000 UTC m=+4998.662784816" watchObservedRunningTime="2025-10-06 22:54:13.372048134 +0000 UTC m=+4998.665084878" Oct 06 22:54:14 crc kubenswrapper[5014]: I1006 22:54:14.337412 5014 generic.go:334] "Generic (PLEG): container finished" podID="ece821f8-f5e4-4e03-8dd6-fab61ba9eca9" containerID="1635a0022de2746cd72301caa5c4047eb93d94e2436cad8a48c0ea6d7e27326c" exitCode=0 Oct 06 22:54:14 crc kubenswrapper[5014]: I1006 22:54:14.337516 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9","Type":"ContainerDied","Data":"1635a0022de2746cd72301caa5c4047eb93d94e2436cad8a48c0ea6d7e27326c"} Oct 06 22:54:15 crc kubenswrapper[5014]: I1006 22:54:15.349810 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ece821f8-f5e4-4e03-8dd6-fab61ba9eca9","Type":"ContainerStarted","Data":"f394314c247052eb6597c1c6acf5cb8df79e3531801be2bf66a1caa44ce9cbde"} Oct 06 22:54:15 crc kubenswrapper[5014]: I1006 22:54:15.351479 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:54:15 crc kubenswrapper[5014]: I1006 22:54:15.389097 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.389068675 podStartE2EDuration="38.389068675s" podCreationTimestamp="2025-10-06 22:53:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:54:15.383876512 +0000 UTC m=+5000.676913276" watchObservedRunningTime="2025-10-06 22:54:15.389068675 +0000 UTC m=+5000.682105449" Oct 06 22:54:21 crc kubenswrapper[5014]: I1006 22:54:21.735753 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:54:21 crc kubenswrapper[5014]: I1006 22:54:21.736684 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:54:26 crc kubenswrapper[5014]: I1006 22:54:26.401893 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Oct 06 22:54:27 crc kubenswrapper[5014]: I1006 22:54:27.699834 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Oct 06 22:54:30 crc kubenswrapper[5014]: I1006 22:54:30.644545 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1-default"] Oct 06 22:54:30 crc kubenswrapper[5014]: I1006 22:54:30.645712 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Oct 06 22:54:30 crc kubenswrapper[5014]: I1006 22:54:30.648061 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-5plzd" Oct 06 22:54:30 crc kubenswrapper[5014]: I1006 22:54:30.655418 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Oct 06 22:54:30 crc kubenswrapper[5014]: I1006 22:54:30.735361 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rblvk\" (UniqueName: \"kubernetes.io/projected/c3eb36a0-eb26-4ba9-a82c-211d83af7028-kube-api-access-rblvk\") pod \"mariadb-client-1-default\" (UID: \"c3eb36a0-eb26-4ba9-a82c-211d83af7028\") " pod="openstack/mariadb-client-1-default" Oct 06 22:54:30 crc kubenswrapper[5014]: I1006 22:54:30.836413 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rblvk\" (UniqueName: \"kubernetes.io/projected/c3eb36a0-eb26-4ba9-a82c-211d83af7028-kube-api-access-rblvk\") pod \"mariadb-client-1-default\" (UID: \"c3eb36a0-eb26-4ba9-a82c-211d83af7028\") " pod="openstack/mariadb-client-1-default" Oct 06 22:54:30 crc kubenswrapper[5014]: I1006 22:54:30.859428 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rblvk\" (UniqueName: \"kubernetes.io/projected/c3eb36a0-eb26-4ba9-a82c-211d83af7028-kube-api-access-rblvk\") pod \"mariadb-client-1-default\" (UID: \"c3eb36a0-eb26-4ba9-a82c-211d83af7028\") " pod="openstack/mariadb-client-1-default" Oct 06 22:54:30 crc kubenswrapper[5014]: I1006 22:54:30.964205 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Oct 06 22:54:31 crc kubenswrapper[5014]: I1006 22:54:31.581765 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Oct 06 22:54:31 crc kubenswrapper[5014]: W1006 22:54:31.589730 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc3eb36a0_eb26_4ba9_a82c_211d83af7028.slice/crio-0b74403e5f69910b6b9c119a04c819f0293db8bcbfe8776b1c638990abaeb10e WatchSource:0}: Error finding container 0b74403e5f69910b6b9c119a04c819f0293db8bcbfe8776b1c638990abaeb10e: Status 404 returned error can't find the container with id 0b74403e5f69910b6b9c119a04c819f0293db8bcbfe8776b1c638990abaeb10e Oct 06 22:54:32 crc kubenswrapper[5014]: I1006 22:54:32.513320 5014 generic.go:334] "Generic (PLEG): container finished" podID="c3eb36a0-eb26-4ba9-a82c-211d83af7028" containerID="c5f6328fd1274d2918237fbf91acbfebe8e5f40d9f7a3f160048bfc5ab3529f2" exitCode=0 Oct 06 22:54:32 crc kubenswrapper[5014]: I1006 22:54:32.513407 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"c3eb36a0-eb26-4ba9-a82c-211d83af7028","Type":"ContainerDied","Data":"c5f6328fd1274d2918237fbf91acbfebe8e5f40d9f7a3f160048bfc5ab3529f2"} Oct 06 22:54:32 crc kubenswrapper[5014]: I1006 22:54:32.513739 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"c3eb36a0-eb26-4ba9-a82c-211d83af7028","Type":"ContainerStarted","Data":"0b74403e5f69910b6b9c119a04c819f0293db8bcbfe8776b1c638990abaeb10e"} Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.019287 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.058523 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1-default_c3eb36a0-eb26-4ba9-a82c-211d83af7028/mariadb-client-1-default/0.log" Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.087506 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1-default"] Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.099172 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1-default"] Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.204139 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rblvk\" (UniqueName: \"kubernetes.io/projected/c3eb36a0-eb26-4ba9-a82c-211d83af7028-kube-api-access-rblvk\") pod \"c3eb36a0-eb26-4ba9-a82c-211d83af7028\" (UID: \"c3eb36a0-eb26-4ba9-a82c-211d83af7028\") " Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.212345 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3eb36a0-eb26-4ba9-a82c-211d83af7028-kube-api-access-rblvk" (OuterVolumeSpecName: "kube-api-access-rblvk") pod "c3eb36a0-eb26-4ba9-a82c-211d83af7028" (UID: "c3eb36a0-eb26-4ba9-a82c-211d83af7028"). InnerVolumeSpecName "kube-api-access-rblvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.306841 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rblvk\" (UniqueName: \"kubernetes.io/projected/c3eb36a0-eb26-4ba9-a82c-211d83af7028-kube-api-access-rblvk\") on node \"crc\" DevicePath \"\"" Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.549196 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0b74403e5f69910b6b9c119a04c819f0293db8bcbfe8776b1c638990abaeb10e" Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.549295 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.677236 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2-default"] Oct 06 22:54:34 crc kubenswrapper[5014]: E1006 22:54:34.677832 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3eb36a0-eb26-4ba9-a82c-211d83af7028" containerName="mariadb-client-1-default" Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.677864 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3eb36a0-eb26-4ba9-a82c-211d83af7028" containerName="mariadb-client-1-default" Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.678149 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3eb36a0-eb26-4ba9-a82c-211d83af7028" containerName="mariadb-client-1-default" Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.679050 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.683140 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-5plzd" Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.692096 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.815524 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdqq9\" (UniqueName: \"kubernetes.io/projected/c9c68dcc-b130-4033-89b4-04a771a350c0-kube-api-access-jdqq9\") pod \"mariadb-client-2-default\" (UID: \"c9c68dcc-b130-4033-89b4-04a771a350c0\") " pod="openstack/mariadb-client-2-default" Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.917393 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdqq9\" (UniqueName: \"kubernetes.io/projected/c9c68dcc-b130-4033-89b4-04a771a350c0-kube-api-access-jdqq9\") pod \"mariadb-client-2-default\" (UID: \"c9c68dcc-b130-4033-89b4-04a771a350c0\") " pod="openstack/mariadb-client-2-default" Oct 06 22:54:34 crc kubenswrapper[5014]: I1006 22:54:34.947172 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdqq9\" (UniqueName: \"kubernetes.io/projected/c9c68dcc-b130-4033-89b4-04a771a350c0-kube-api-access-jdqq9\") pod \"mariadb-client-2-default\" (UID: \"c9c68dcc-b130-4033-89b4-04a771a350c0\") " pod="openstack/mariadb-client-2-default" Oct 06 22:54:35 crc kubenswrapper[5014]: I1006 22:54:35.049321 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Oct 06 22:54:35 crc kubenswrapper[5014]: I1006 22:54:35.439685 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Oct 06 22:54:35 crc kubenswrapper[5014]: I1006 22:54:35.497065 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3eb36a0-eb26-4ba9-a82c-211d83af7028" path="/var/lib/kubelet/pods/c3eb36a0-eb26-4ba9-a82c-211d83af7028/volumes" Oct 06 22:54:35 crc kubenswrapper[5014]: I1006 22:54:35.558991 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"c9c68dcc-b130-4033-89b4-04a771a350c0","Type":"ContainerStarted","Data":"b85ae65bbbc347ea784f270e4870926575e31c08251a1bf95982c7f0ff75c7e5"} Oct 06 22:54:36 crc kubenswrapper[5014]: I1006 22:54:36.571553 5014 generic.go:334] "Generic (PLEG): container finished" podID="c9c68dcc-b130-4033-89b4-04a771a350c0" containerID="72c52a6401df1c76feeac8a62b43e0dff7c8b06dbb2b59d2e4fac76d33a441a0" exitCode=0 Oct 06 22:54:36 crc kubenswrapper[5014]: I1006 22:54:36.571673 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"c9c68dcc-b130-4033-89b4-04a771a350c0","Type":"ContainerDied","Data":"72c52a6401df1c76feeac8a62b43e0dff7c8b06dbb2b59d2e4fac76d33a441a0"} Oct 06 22:54:38 crc kubenswrapper[5014]: I1006 22:54:38.105488 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Oct 06 22:54:38 crc kubenswrapper[5014]: I1006 22:54:38.159983 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-2-default_c9c68dcc-b130-4033-89b4-04a771a350c0/mariadb-client-2-default/0.log" Oct 06 22:54:38 crc kubenswrapper[5014]: I1006 22:54:38.210817 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2-default"] Oct 06 22:54:38 crc kubenswrapper[5014]: I1006 22:54:38.213893 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2-default"] Oct 06 22:54:38 crc kubenswrapper[5014]: I1006 22:54:38.283604 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdqq9\" (UniqueName: \"kubernetes.io/projected/c9c68dcc-b130-4033-89b4-04a771a350c0-kube-api-access-jdqq9\") pod \"c9c68dcc-b130-4033-89b4-04a771a350c0\" (UID: \"c9c68dcc-b130-4033-89b4-04a771a350c0\") " Oct 06 22:54:38 crc kubenswrapper[5014]: I1006 22:54:38.292117 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9c68dcc-b130-4033-89b4-04a771a350c0-kube-api-access-jdqq9" (OuterVolumeSpecName: "kube-api-access-jdqq9") pod "c9c68dcc-b130-4033-89b4-04a771a350c0" (UID: "c9c68dcc-b130-4033-89b4-04a771a350c0"). InnerVolumeSpecName "kube-api-access-jdqq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:54:38 crc kubenswrapper[5014]: I1006 22:54:38.385948 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdqq9\" (UniqueName: \"kubernetes.io/projected/c9c68dcc-b130-4033-89b4-04a771a350c0-kube-api-access-jdqq9\") on node \"crc\" DevicePath \"\"" Oct 06 22:54:38 crc kubenswrapper[5014]: I1006 22:54:38.600974 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b85ae65bbbc347ea784f270e4870926575e31c08251a1bf95982c7f0ff75c7e5" Oct 06 22:54:38 crc kubenswrapper[5014]: I1006 22:54:38.601010 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Oct 06 22:54:38 crc kubenswrapper[5014]: I1006 22:54:38.805665 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1"] Oct 06 22:54:38 crc kubenswrapper[5014]: E1006 22:54:38.806331 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9c68dcc-b130-4033-89b4-04a771a350c0" containerName="mariadb-client-2-default" Oct 06 22:54:38 crc kubenswrapper[5014]: I1006 22:54:38.806390 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9c68dcc-b130-4033-89b4-04a771a350c0" containerName="mariadb-client-2-default" Oct 06 22:54:38 crc kubenswrapper[5014]: I1006 22:54:38.806842 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9c68dcc-b130-4033-89b4-04a771a350c0" containerName="mariadb-client-2-default" Oct 06 22:54:38 crc kubenswrapper[5014]: I1006 22:54:38.808091 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Oct 06 22:54:38 crc kubenswrapper[5014]: I1006 22:54:38.813476 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-5plzd" Oct 06 22:54:38 crc kubenswrapper[5014]: I1006 22:54:38.827311 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Oct 06 22:54:38 crc kubenswrapper[5014]: I1006 22:54:38.997262 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnr57\" (UniqueName: \"kubernetes.io/projected/ca8ed2f2-f117-45c9-b5fd-4248e40dae55-kube-api-access-vnr57\") pod \"mariadb-client-1\" (UID: \"ca8ed2f2-f117-45c9-b5fd-4248e40dae55\") " pod="openstack/mariadb-client-1" Oct 06 22:54:39 crc kubenswrapper[5014]: I1006 22:54:39.099464 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnr57\" (UniqueName: \"kubernetes.io/projected/ca8ed2f2-f117-45c9-b5fd-4248e40dae55-kube-api-access-vnr57\") pod \"mariadb-client-1\" (UID: \"ca8ed2f2-f117-45c9-b5fd-4248e40dae55\") " pod="openstack/mariadb-client-1" Oct 06 22:54:39 crc kubenswrapper[5014]: I1006 22:54:39.129576 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnr57\" (UniqueName: \"kubernetes.io/projected/ca8ed2f2-f117-45c9-b5fd-4248e40dae55-kube-api-access-vnr57\") pod \"mariadb-client-1\" (UID: \"ca8ed2f2-f117-45c9-b5fd-4248e40dae55\") " pod="openstack/mariadb-client-1" Oct 06 22:54:39 crc kubenswrapper[5014]: I1006 22:54:39.143853 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Oct 06 22:54:39 crc kubenswrapper[5014]: I1006 22:54:39.502003 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9c68dcc-b130-4033-89b4-04a771a350c0" path="/var/lib/kubelet/pods/c9c68dcc-b130-4033-89b4-04a771a350c0/volumes" Oct 06 22:54:39 crc kubenswrapper[5014]: I1006 22:54:39.837081 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Oct 06 22:54:39 crc kubenswrapper[5014]: W1006 22:54:39.843959 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca8ed2f2_f117_45c9_b5fd_4248e40dae55.slice/crio-b74bb923131d68cc0f15dfa81a66d0835ef3b86006b2704bd033542aeff480f7 WatchSource:0}: Error finding container b74bb923131d68cc0f15dfa81a66d0835ef3b86006b2704bd033542aeff480f7: Status 404 returned error can't find the container with id b74bb923131d68cc0f15dfa81a66d0835ef3b86006b2704bd033542aeff480f7 Oct 06 22:54:40 crc kubenswrapper[5014]: I1006 22:54:40.620245 5014 generic.go:334] "Generic (PLEG): container finished" podID="ca8ed2f2-f117-45c9-b5fd-4248e40dae55" containerID="bfc96154553144da4a7277d75d34655ef9da34eb76310f80b6b678fc49d4902b" exitCode=0 Oct 06 22:54:40 crc kubenswrapper[5014]: I1006 22:54:40.620345 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"ca8ed2f2-f117-45c9-b5fd-4248e40dae55","Type":"ContainerDied","Data":"bfc96154553144da4a7277d75d34655ef9da34eb76310f80b6b678fc49d4902b"} Oct 06 22:54:40 crc kubenswrapper[5014]: I1006 22:54:40.620709 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"ca8ed2f2-f117-45c9-b5fd-4248e40dae55","Type":"ContainerStarted","Data":"b74bb923131d68cc0f15dfa81a66d0835ef3b86006b2704bd033542aeff480f7"} Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.151935 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.172310 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1_ca8ed2f2-f117-45c9-b5fd-4248e40dae55/mariadb-client-1/0.log" Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.213272 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1"] Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.225288 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1"] Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.278760 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vnr57\" (UniqueName: \"kubernetes.io/projected/ca8ed2f2-f117-45c9-b5fd-4248e40dae55-kube-api-access-vnr57\") pod \"ca8ed2f2-f117-45c9-b5fd-4248e40dae55\" (UID: \"ca8ed2f2-f117-45c9-b5fd-4248e40dae55\") " Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.287389 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca8ed2f2-f117-45c9-b5fd-4248e40dae55-kube-api-access-vnr57" (OuterVolumeSpecName: "kube-api-access-vnr57") pod "ca8ed2f2-f117-45c9-b5fd-4248e40dae55" (UID: "ca8ed2f2-f117-45c9-b5fd-4248e40dae55"). InnerVolumeSpecName "kube-api-access-vnr57". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.381501 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vnr57\" (UniqueName: \"kubernetes.io/projected/ca8ed2f2-f117-45c9-b5fd-4248e40dae55-kube-api-access-vnr57\") on node \"crc\" DevicePath \"\"" Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.646351 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b74bb923131d68cc0f15dfa81a66d0835ef3b86006b2704bd033542aeff480f7" Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.646448 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.777273 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-4-default"] Oct 06 22:54:42 crc kubenswrapper[5014]: E1006 22:54:42.777924 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca8ed2f2-f117-45c9-b5fd-4248e40dae55" containerName="mariadb-client-1" Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.777967 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca8ed2f2-f117-45c9-b5fd-4248e40dae55" containerName="mariadb-client-1" Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.778294 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca8ed2f2-f117-45c9-b5fd-4248e40dae55" containerName="mariadb-client-1" Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.779194 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.782783 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-5plzd" Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.789282 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpb8n\" (UniqueName: \"kubernetes.io/projected/4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645-kube-api-access-xpb8n\") pod \"mariadb-client-4-default\" (UID: \"4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645\") " pod="openstack/mariadb-client-4-default" Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.797094 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.891643 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpb8n\" (UniqueName: \"kubernetes.io/projected/4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645-kube-api-access-xpb8n\") pod \"mariadb-client-4-default\" (UID: \"4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645\") " pod="openstack/mariadb-client-4-default" Oct 06 22:54:42 crc kubenswrapper[5014]: I1006 22:54:42.927269 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpb8n\" (UniqueName: \"kubernetes.io/projected/4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645-kube-api-access-xpb8n\") pod \"mariadb-client-4-default\" (UID: \"4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645\") " pod="openstack/mariadb-client-4-default" Oct 06 22:54:43 crc kubenswrapper[5014]: I1006 22:54:43.109470 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Oct 06 22:54:43 crc kubenswrapper[5014]: I1006 22:54:43.496112 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca8ed2f2-f117-45c9-b5fd-4248e40dae55" path="/var/lib/kubelet/pods/ca8ed2f2-f117-45c9-b5fd-4248e40dae55/volumes" Oct 06 22:54:43 crc kubenswrapper[5014]: I1006 22:54:43.664713 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Oct 06 22:54:43 crc kubenswrapper[5014]: W1006 22:54:43.671175 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4bf20e3a_45c6_4ea0_8fa8_6f8a62be8645.slice/crio-978e556f8b55f6391750ae8d7cadc0b884bff7d3993fa525810a97653d9d7eaf WatchSource:0}: Error finding container 978e556f8b55f6391750ae8d7cadc0b884bff7d3993fa525810a97653d9d7eaf: Status 404 returned error can't find the container with id 978e556f8b55f6391750ae8d7cadc0b884bff7d3993fa525810a97653d9d7eaf Oct 06 22:54:44 crc kubenswrapper[5014]: I1006 22:54:44.670491 5014 generic.go:334] "Generic (PLEG): container finished" podID="4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645" containerID="66db457509f91e6efa3a78d6013576053423c7db985ff0315d453497182e2549" exitCode=0 Oct 06 22:54:44 crc kubenswrapper[5014]: I1006 22:54:44.670652 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645","Type":"ContainerDied","Data":"66db457509f91e6efa3a78d6013576053423c7db985ff0315d453497182e2549"} Oct 06 22:54:44 crc kubenswrapper[5014]: I1006 22:54:44.670831 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645","Type":"ContainerStarted","Data":"978e556f8b55f6391750ae8d7cadc0b884bff7d3993fa525810a97653d9d7eaf"} Oct 06 22:54:46 crc kubenswrapper[5014]: I1006 22:54:46.199225 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Oct 06 22:54:46 crc kubenswrapper[5014]: I1006 22:54:46.220282 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-4-default_4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645/mariadb-client-4-default/0.log" Oct 06 22:54:46 crc kubenswrapper[5014]: I1006 22:54:46.251341 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-4-default"] Oct 06 22:54:46 crc kubenswrapper[5014]: I1006 22:54:46.254331 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xpb8n\" (UniqueName: \"kubernetes.io/projected/4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645-kube-api-access-xpb8n\") pod \"4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645\" (UID: \"4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645\") " Oct 06 22:54:46 crc kubenswrapper[5014]: I1006 22:54:46.259057 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-4-default"] Oct 06 22:54:46 crc kubenswrapper[5014]: I1006 22:54:46.263027 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645-kube-api-access-xpb8n" (OuterVolumeSpecName: "kube-api-access-xpb8n") pod "4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645" (UID: "4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645"). InnerVolumeSpecName "kube-api-access-xpb8n". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:54:46 crc kubenswrapper[5014]: I1006 22:54:46.356475 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xpb8n\" (UniqueName: \"kubernetes.io/projected/4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645-kube-api-access-xpb8n\") on node \"crc\" DevicePath \"\"" Oct 06 22:54:46 crc kubenswrapper[5014]: I1006 22:54:46.692562 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="978e556f8b55f6391750ae8d7cadc0b884bff7d3993fa525810a97653d9d7eaf" Oct 06 22:54:46 crc kubenswrapper[5014]: I1006 22:54:46.692654 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Oct 06 22:54:47 crc kubenswrapper[5014]: I1006 22:54:47.503679 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645" path="/var/lib/kubelet/pods/4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645/volumes" Oct 06 22:54:50 crc kubenswrapper[5014]: I1006 22:54:50.566868 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-5-default"] Oct 06 22:54:50 crc kubenswrapper[5014]: E1006 22:54:50.567539 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645" containerName="mariadb-client-4-default" Oct 06 22:54:50 crc kubenswrapper[5014]: I1006 22:54:50.567569 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645" containerName="mariadb-client-4-default" Oct 06 22:54:50 crc kubenswrapper[5014]: I1006 22:54:50.567999 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="4bf20e3a-45c6-4ea0-8fa8-6f8a62be8645" containerName="mariadb-client-4-default" Oct 06 22:54:50 crc kubenswrapper[5014]: I1006 22:54:50.569092 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Oct 06 22:54:50 crc kubenswrapper[5014]: I1006 22:54:50.577352 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-5plzd" Oct 06 22:54:50 crc kubenswrapper[5014]: I1006 22:54:50.586208 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Oct 06 22:54:50 crc kubenswrapper[5014]: I1006 22:54:50.731485 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bq5pp\" (UniqueName: \"kubernetes.io/projected/59d83c51-141e-4729-93f6-3a476f41b4de-kube-api-access-bq5pp\") pod \"mariadb-client-5-default\" (UID: \"59d83c51-141e-4729-93f6-3a476f41b4de\") " pod="openstack/mariadb-client-5-default" Oct 06 22:54:50 crc kubenswrapper[5014]: I1006 22:54:50.834305 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bq5pp\" (UniqueName: \"kubernetes.io/projected/59d83c51-141e-4729-93f6-3a476f41b4de-kube-api-access-bq5pp\") pod \"mariadb-client-5-default\" (UID: \"59d83c51-141e-4729-93f6-3a476f41b4de\") " pod="openstack/mariadb-client-5-default" Oct 06 22:54:50 crc kubenswrapper[5014]: I1006 22:54:50.863489 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bq5pp\" (UniqueName: \"kubernetes.io/projected/59d83c51-141e-4729-93f6-3a476f41b4de-kube-api-access-bq5pp\") pod \"mariadb-client-5-default\" (UID: \"59d83c51-141e-4729-93f6-3a476f41b4de\") " pod="openstack/mariadb-client-5-default" Oct 06 22:54:50 crc kubenswrapper[5014]: I1006 22:54:50.908745 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Oct 06 22:54:51 crc kubenswrapper[5014]: I1006 22:54:51.524213 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Oct 06 22:54:51 crc kubenswrapper[5014]: I1006 22:54:51.735826 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:54:51 crc kubenswrapper[5014]: I1006 22:54:51.735914 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:54:51 crc kubenswrapper[5014]: I1006 22:54:51.735984 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 22:54:51 crc kubenswrapper[5014]: I1006 22:54:51.736894 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f1b2dba782986714b78842b612ab901075cbfeaf1e1971b541c84d452a74bcc2"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 22:54:51 crc kubenswrapper[5014]: I1006 22:54:51.736996 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://f1b2dba782986714b78842b612ab901075cbfeaf1e1971b541c84d452a74bcc2" gracePeriod=600 Oct 06 22:54:51 crc kubenswrapper[5014]: I1006 22:54:51.749017 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"59d83c51-141e-4729-93f6-3a476f41b4de","Type":"ContainerStarted","Data":"c2d6ee7d54ae2a86a309e31bb0310a43667698021ac6ad58cbae7fb40d248d0f"} Oct 06 22:54:52 crc kubenswrapper[5014]: I1006 22:54:52.764744 5014 generic.go:334] "Generic (PLEG): container finished" podID="59d83c51-141e-4729-93f6-3a476f41b4de" containerID="209526715e7f712200631547f00e59c0f67a1afabd212fd19b7e394add05073d" exitCode=0 Oct 06 22:54:52 crc kubenswrapper[5014]: I1006 22:54:52.764816 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"59d83c51-141e-4729-93f6-3a476f41b4de","Type":"ContainerDied","Data":"209526715e7f712200631547f00e59c0f67a1afabd212fd19b7e394add05073d"} Oct 06 22:54:52 crc kubenswrapper[5014]: I1006 22:54:52.770906 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="f1b2dba782986714b78842b612ab901075cbfeaf1e1971b541c84d452a74bcc2" exitCode=0 Oct 06 22:54:52 crc kubenswrapper[5014]: I1006 22:54:52.770971 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"f1b2dba782986714b78842b612ab901075cbfeaf1e1971b541c84d452a74bcc2"} Oct 06 22:54:52 crc kubenswrapper[5014]: I1006 22:54:52.771013 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016"} Oct 06 22:54:52 crc kubenswrapper[5014]: I1006 22:54:52.771041 5014 scope.go:117] "RemoveContainer" containerID="d51ffd4da07a6c60a5dd7e86932c78820e7e468ee51494fdaa6fababc4b967ae" Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.189697 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.195470 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bq5pp\" (UniqueName: \"kubernetes.io/projected/59d83c51-141e-4729-93f6-3a476f41b4de-kube-api-access-bq5pp\") pod \"59d83c51-141e-4729-93f6-3a476f41b4de\" (UID: \"59d83c51-141e-4729-93f6-3a476f41b4de\") " Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.202784 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59d83c51-141e-4729-93f6-3a476f41b4de-kube-api-access-bq5pp" (OuterVolumeSpecName: "kube-api-access-bq5pp") pod "59d83c51-141e-4729-93f6-3a476f41b4de" (UID: "59d83c51-141e-4729-93f6-3a476f41b4de"). InnerVolumeSpecName "kube-api-access-bq5pp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.212632 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-5-default_59d83c51-141e-4729-93f6-3a476f41b4de/mariadb-client-5-default/0.log" Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.247470 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-5-default"] Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.253408 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-5-default"] Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.297005 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bq5pp\" (UniqueName: \"kubernetes.io/projected/59d83c51-141e-4729-93f6-3a476f41b4de-kube-api-access-bq5pp\") on node \"crc\" DevicePath \"\"" Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.406080 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-6-default"] Oct 06 22:54:54 crc kubenswrapper[5014]: E1006 22:54:54.406679 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59d83c51-141e-4729-93f6-3a476f41b4de" containerName="mariadb-client-5-default" Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.406710 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="59d83c51-141e-4729-93f6-3a476f41b4de" containerName="mariadb-client-5-default" Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.407069 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="59d83c51-141e-4729-93f6-3a476f41b4de" containerName="mariadb-client-5-default" Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.408157 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.413229 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.500564 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pgr9\" (UniqueName: \"kubernetes.io/projected/54e491cb-901a-491d-adcf-94396d5010e8-kube-api-access-4pgr9\") pod \"mariadb-client-6-default\" (UID: \"54e491cb-901a-491d-adcf-94396d5010e8\") " pod="openstack/mariadb-client-6-default" Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.602272 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pgr9\" (UniqueName: \"kubernetes.io/projected/54e491cb-901a-491d-adcf-94396d5010e8-kube-api-access-4pgr9\") pod \"mariadb-client-6-default\" (UID: \"54e491cb-901a-491d-adcf-94396d5010e8\") " pod="openstack/mariadb-client-6-default" Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.627493 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pgr9\" (UniqueName: \"kubernetes.io/projected/54e491cb-901a-491d-adcf-94396d5010e8-kube-api-access-4pgr9\") pod \"mariadb-client-6-default\" (UID: \"54e491cb-901a-491d-adcf-94396d5010e8\") " pod="openstack/mariadb-client-6-default" Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.736501 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.794675 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2d6ee7d54ae2a86a309e31bb0310a43667698021ac6ad58cbae7fb40d248d0f" Oct 06 22:54:54 crc kubenswrapper[5014]: I1006 22:54:54.794726 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Oct 06 22:54:55 crc kubenswrapper[5014]: W1006 22:54:55.295221 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54e491cb_901a_491d_adcf_94396d5010e8.slice/crio-4f2abfe784dcbb5d9c89cb765bf0b3bcabfff9771bc37bfb0e904f9456abd70a WatchSource:0}: Error finding container 4f2abfe784dcbb5d9c89cb765bf0b3bcabfff9771bc37bfb0e904f9456abd70a: Status 404 returned error can't find the container with id 4f2abfe784dcbb5d9c89cb765bf0b3bcabfff9771bc37bfb0e904f9456abd70a Oct 06 22:54:55 crc kubenswrapper[5014]: I1006 22:54:55.296902 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Oct 06 22:54:55 crc kubenswrapper[5014]: I1006 22:54:55.503734 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59d83c51-141e-4729-93f6-3a476f41b4de" path="/var/lib/kubelet/pods/59d83c51-141e-4729-93f6-3a476f41b4de/volumes" Oct 06 22:54:55 crc kubenswrapper[5014]: I1006 22:54:55.808148 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"54e491cb-901a-491d-adcf-94396d5010e8","Type":"ContainerStarted","Data":"95156c492eca6aa029a21746cfc24747b7de0d0b8aad42dea075ec296343e731"} Oct 06 22:54:55 crc kubenswrapper[5014]: I1006 22:54:55.808393 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"54e491cb-901a-491d-adcf-94396d5010e8","Type":"ContainerStarted","Data":"4f2abfe784dcbb5d9c89cb765bf0b3bcabfff9771bc37bfb0e904f9456abd70a"} Oct 06 22:54:55 crc kubenswrapper[5014]: I1006 22:54:55.829335 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client-6-default" podStartSLOduration=1.82931958 podStartE2EDuration="1.82931958s" podCreationTimestamp="2025-10-06 22:54:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:54:55.82643755 +0000 UTC m=+5041.119474314" watchObservedRunningTime="2025-10-06 22:54:55.82931958 +0000 UTC m=+5041.122356324" Oct 06 22:54:56 crc kubenswrapper[5014]: I1006 22:54:56.823793 5014 generic.go:334] "Generic (PLEG): container finished" podID="54e491cb-901a-491d-adcf-94396d5010e8" containerID="95156c492eca6aa029a21746cfc24747b7de0d0b8aad42dea075ec296343e731" exitCode=0 Oct 06 22:54:56 crc kubenswrapper[5014]: I1006 22:54:56.824093 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"54e491cb-901a-491d-adcf-94396d5010e8","Type":"ContainerDied","Data":"95156c492eca6aa029a21746cfc24747b7de0d0b8aad42dea075ec296343e731"} Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.316642 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-b6tzl"] Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.318480 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.327119 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b6tzl"] Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.336463 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.387038 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-6-default"] Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.391979 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-6-default"] Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.467023 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4pgr9\" (UniqueName: \"kubernetes.io/projected/54e491cb-901a-491d-adcf-94396d5010e8-kube-api-access-4pgr9\") pod \"54e491cb-901a-491d-adcf-94396d5010e8\" (UID: \"54e491cb-901a-491d-adcf-94396d5010e8\") " Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.467325 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f4fc548-e5d4-4e73-866f-d256190227b1-catalog-content\") pod \"community-operators-b6tzl\" (UID: \"2f4fc548-e5d4-4e73-866f-d256190227b1\") " pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.467383 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f4fc548-e5d4-4e73-866f-d256190227b1-utilities\") pod \"community-operators-b6tzl\" (UID: \"2f4fc548-e5d4-4e73-866f-d256190227b1\") " pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.467434 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2d4t\" (UniqueName: \"kubernetes.io/projected/2f4fc548-e5d4-4e73-866f-d256190227b1-kube-api-access-z2d4t\") pod \"community-operators-b6tzl\" (UID: \"2f4fc548-e5d4-4e73-866f-d256190227b1\") " pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.472561 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54e491cb-901a-491d-adcf-94396d5010e8-kube-api-access-4pgr9" (OuterVolumeSpecName: "kube-api-access-4pgr9") pod "54e491cb-901a-491d-adcf-94396d5010e8" (UID: "54e491cb-901a-491d-adcf-94396d5010e8"). InnerVolumeSpecName "kube-api-access-4pgr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.569045 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f4fc548-e5d4-4e73-866f-d256190227b1-catalog-content\") pod \"community-operators-b6tzl\" (UID: \"2f4fc548-e5d4-4e73-866f-d256190227b1\") " pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.569449 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f4fc548-e5d4-4e73-866f-d256190227b1-utilities\") pod \"community-operators-b6tzl\" (UID: \"2f4fc548-e5d4-4e73-866f-d256190227b1\") " pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.569494 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2d4t\" (UniqueName: \"kubernetes.io/projected/2f4fc548-e5d4-4e73-866f-d256190227b1-kube-api-access-z2d4t\") pod \"community-operators-b6tzl\" (UID: \"2f4fc548-e5d4-4e73-866f-d256190227b1\") " pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.569567 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4pgr9\" (UniqueName: \"kubernetes.io/projected/54e491cb-901a-491d-adcf-94396d5010e8-kube-api-access-4pgr9\") on node \"crc\" DevicePath \"\"" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.569688 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f4fc548-e5d4-4e73-866f-d256190227b1-catalog-content\") pod \"community-operators-b6tzl\" (UID: \"2f4fc548-e5d4-4e73-866f-d256190227b1\") " pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.570053 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f4fc548-e5d4-4e73-866f-d256190227b1-utilities\") pod \"community-operators-b6tzl\" (UID: \"2f4fc548-e5d4-4e73-866f-d256190227b1\") " pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.573388 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-7-default"] Oct 06 22:54:58 crc kubenswrapper[5014]: E1006 22:54:58.573918 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54e491cb-901a-491d-adcf-94396d5010e8" containerName="mariadb-client-6-default" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.573948 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="54e491cb-901a-491d-adcf-94396d5010e8" containerName="mariadb-client-6-default" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.574180 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="54e491cb-901a-491d-adcf-94396d5010e8" containerName="mariadb-client-6-default" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.574913 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.587354 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.592752 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2d4t\" (UniqueName: \"kubernetes.io/projected/2f4fc548-e5d4-4e73-866f-d256190227b1-kube-api-access-z2d4t\") pod \"community-operators-b6tzl\" (UID: \"2f4fc548-e5d4-4e73-866f-d256190227b1\") " pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.669040 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.772580 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pk4x\" (UniqueName: \"kubernetes.io/projected/b4fc3e72-ebe0-4c29-982a-a5a01b22013a-kube-api-access-8pk4x\") pod \"mariadb-client-7-default\" (UID: \"b4fc3e72-ebe0-4c29-982a-a5a01b22013a\") " pod="openstack/mariadb-client-7-default" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.852163 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4f2abfe784dcbb5d9c89cb765bf0b3bcabfff9771bc37bfb0e904f9456abd70a" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.852224 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.873631 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pk4x\" (UniqueName: \"kubernetes.io/projected/b4fc3e72-ebe0-4c29-982a-a5a01b22013a-kube-api-access-8pk4x\") pod \"mariadb-client-7-default\" (UID: \"b4fc3e72-ebe0-4c29-982a-a5a01b22013a\") " pod="openstack/mariadb-client-7-default" Oct 06 22:54:58 crc kubenswrapper[5014]: I1006 22:54:58.895543 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pk4x\" (UniqueName: \"kubernetes.io/projected/b4fc3e72-ebe0-4c29-982a-a5a01b22013a-kube-api-access-8pk4x\") pod \"mariadb-client-7-default\" (UID: \"b4fc3e72-ebe0-4c29-982a-a5a01b22013a\") " pod="openstack/mariadb-client-7-default" Oct 06 22:54:59 crc kubenswrapper[5014]: I1006 22:54:59.165569 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b6tzl"] Oct 06 22:54:59 crc kubenswrapper[5014]: W1006 22:54:59.170820 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2f4fc548_e5d4_4e73_866f_d256190227b1.slice/crio-d88ec8513648b1283f5585a926ba866a004a41e463eb6e5f70cea743173541c0 WatchSource:0}: Error finding container d88ec8513648b1283f5585a926ba866a004a41e463eb6e5f70cea743173541c0: Status 404 returned error can't find the container with id d88ec8513648b1283f5585a926ba866a004a41e463eb6e5f70cea743173541c0 Oct 06 22:54:59 crc kubenswrapper[5014]: I1006 22:54:59.195588 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Oct 06 22:54:59 crc kubenswrapper[5014]: I1006 22:54:59.496472 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54e491cb-901a-491d-adcf-94396d5010e8" path="/var/lib/kubelet/pods/54e491cb-901a-491d-adcf-94396d5010e8/volumes" Oct 06 22:54:59 crc kubenswrapper[5014]: I1006 22:54:59.579725 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Oct 06 22:54:59 crc kubenswrapper[5014]: W1006 22:54:59.582490 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4fc3e72_ebe0_4c29_982a_a5a01b22013a.slice/crio-021ae20839c39804e95bc364dd3a43403df9d4470664198bbbfd46bd8b229fde WatchSource:0}: Error finding container 021ae20839c39804e95bc364dd3a43403df9d4470664198bbbfd46bd8b229fde: Status 404 returned error can't find the container with id 021ae20839c39804e95bc364dd3a43403df9d4470664198bbbfd46bd8b229fde Oct 06 22:54:59 crc kubenswrapper[5014]: I1006 22:54:59.862890 5014 generic.go:334] "Generic (PLEG): container finished" podID="b4fc3e72-ebe0-4c29-982a-a5a01b22013a" containerID="86fbb7e11cf774f078adf6b67c2834062bf8117ccbd4af07c5fa78e4ffee8b6b" exitCode=0 Oct 06 22:54:59 crc kubenswrapper[5014]: I1006 22:54:59.863008 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"b4fc3e72-ebe0-4c29-982a-a5a01b22013a","Type":"ContainerDied","Data":"86fbb7e11cf774f078adf6b67c2834062bf8117ccbd4af07c5fa78e4ffee8b6b"} Oct 06 22:54:59 crc kubenswrapper[5014]: I1006 22:54:59.863051 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"b4fc3e72-ebe0-4c29-982a-a5a01b22013a","Type":"ContainerStarted","Data":"021ae20839c39804e95bc364dd3a43403df9d4470664198bbbfd46bd8b229fde"} Oct 06 22:54:59 crc kubenswrapper[5014]: I1006 22:54:59.865378 5014 generic.go:334] "Generic (PLEG): container finished" podID="2f4fc548-e5d4-4e73-866f-d256190227b1" containerID="15eced8f2a77e53b3b21bddd61ef27f5a79e7ef64e61e794ae608163d20654ed" exitCode=0 Oct 06 22:54:59 crc kubenswrapper[5014]: I1006 22:54:59.865426 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b6tzl" event={"ID":"2f4fc548-e5d4-4e73-866f-d256190227b1","Type":"ContainerDied","Data":"15eced8f2a77e53b3b21bddd61ef27f5a79e7ef64e61e794ae608163d20654ed"} Oct 06 22:54:59 crc kubenswrapper[5014]: I1006 22:54:59.865486 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b6tzl" event={"ID":"2f4fc548-e5d4-4e73-866f-d256190227b1","Type":"ContainerStarted","Data":"d88ec8513648b1283f5585a926ba866a004a41e463eb6e5f70cea743173541c0"} Oct 06 22:55:00 crc kubenswrapper[5014]: I1006 22:55:00.879535 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b6tzl" event={"ID":"2f4fc548-e5d4-4e73-866f-d256190227b1","Type":"ContainerStarted","Data":"bd0b8dc3ad8f3e913bac9e2f2a58b5b8f5fdf8c6fb8e6dd8a7485ace446a6f7c"} Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.398680 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.421756 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-7-default_b4fc3e72-ebe0-4c29-982a-a5a01b22013a/mariadb-client-7-default/0.log" Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.448153 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-7-default"] Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.456358 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-7-default"] Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.543708 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pk4x\" (UniqueName: \"kubernetes.io/projected/b4fc3e72-ebe0-4c29-982a-a5a01b22013a-kube-api-access-8pk4x\") pod \"b4fc3e72-ebe0-4c29-982a-a5a01b22013a\" (UID: \"b4fc3e72-ebe0-4c29-982a-a5a01b22013a\") " Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.551849 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4fc3e72-ebe0-4c29-982a-a5a01b22013a-kube-api-access-8pk4x" (OuterVolumeSpecName: "kube-api-access-8pk4x") pod "b4fc3e72-ebe0-4c29-982a-a5a01b22013a" (UID: "b4fc3e72-ebe0-4c29-982a-a5a01b22013a"). InnerVolumeSpecName "kube-api-access-8pk4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.645310 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2"] Oct 06 22:55:01 crc kubenswrapper[5014]: E1006 22:55:01.646080 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4fc3e72-ebe0-4c29-982a-a5a01b22013a" containerName="mariadb-client-7-default" Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.646328 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4fc3e72-ebe0-4c29-982a-a5a01b22013a" containerName="mariadb-client-7-default" Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.646847 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4fc3e72-ebe0-4c29-982a-a5a01b22013a" containerName="mariadb-client-7-default" Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.647233 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8pk4x\" (UniqueName: \"kubernetes.io/projected/b4fc3e72-ebe0-4c29-982a-a5a01b22013a-kube-api-access-8pk4x\") on node \"crc\" DevicePath \"\"" Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.647715 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.663515 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.748921 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9cfw\" (UniqueName: \"kubernetes.io/projected/bd2fa05e-ed1f-4444-9a99-e46f24a6a023-kube-api-access-g9cfw\") pod \"mariadb-client-2\" (UID: \"bd2fa05e-ed1f-4444-9a99-e46f24a6a023\") " pod="openstack/mariadb-client-2" Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.850583 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9cfw\" (UniqueName: \"kubernetes.io/projected/bd2fa05e-ed1f-4444-9a99-e46f24a6a023-kube-api-access-g9cfw\") pod \"mariadb-client-2\" (UID: \"bd2fa05e-ed1f-4444-9a99-e46f24a6a023\") " pod="openstack/mariadb-client-2" Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.881741 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9cfw\" (UniqueName: \"kubernetes.io/projected/bd2fa05e-ed1f-4444-9a99-e46f24a6a023-kube-api-access-g9cfw\") pod \"mariadb-client-2\" (UID: \"bd2fa05e-ed1f-4444-9a99-e46f24a6a023\") " pod="openstack/mariadb-client-2" Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.894182 5014 scope.go:117] "RemoveContainer" containerID="86fbb7e11cf774f078adf6b67c2834062bf8117ccbd4af07c5fa78e4ffee8b6b" Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.894199 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.898517 5014 generic.go:334] "Generic (PLEG): container finished" podID="2f4fc548-e5d4-4e73-866f-d256190227b1" containerID="bd0b8dc3ad8f3e913bac9e2f2a58b5b8f5fdf8c6fb8e6dd8a7485ace446a6f7c" exitCode=0 Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.898590 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b6tzl" event={"ID":"2f4fc548-e5d4-4e73-866f-d256190227b1","Type":"ContainerDied","Data":"bd0b8dc3ad8f3e913bac9e2f2a58b5b8f5fdf8c6fb8e6dd8a7485ace446a6f7c"} Oct 06 22:55:01 crc kubenswrapper[5014]: I1006 22:55:01.974578 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Oct 06 22:55:02 crc kubenswrapper[5014]: I1006 22:55:02.589024 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Oct 06 22:55:02 crc kubenswrapper[5014]: W1006 22:55:02.914044 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd2fa05e_ed1f_4444_9a99_e46f24a6a023.slice/crio-3e8eb8ae745aee04d0bd65cff27797ec48a1544860cb1f4bc96fcf8e6395135c WatchSource:0}: Error finding container 3e8eb8ae745aee04d0bd65cff27797ec48a1544860cb1f4bc96fcf8e6395135c: Status 404 returned error can't find the container with id 3e8eb8ae745aee04d0bd65cff27797ec48a1544860cb1f4bc96fcf8e6395135c Oct 06 22:55:03 crc kubenswrapper[5014]: I1006 22:55:03.506754 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4fc3e72-ebe0-4c29-982a-a5a01b22013a" path="/var/lib/kubelet/pods/b4fc3e72-ebe0-4c29-982a-a5a01b22013a/volumes" Oct 06 22:55:03 crc kubenswrapper[5014]: I1006 22:55:03.934873 5014 generic.go:334] "Generic (PLEG): container finished" podID="bd2fa05e-ed1f-4444-9a99-e46f24a6a023" containerID="af4106729a78544128dadec8d86d2d5546afd1b211c4bb828e918d98cd1157cf" exitCode=0 Oct 06 22:55:03 crc kubenswrapper[5014]: I1006 22:55:03.934944 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"bd2fa05e-ed1f-4444-9a99-e46f24a6a023","Type":"ContainerDied","Data":"af4106729a78544128dadec8d86d2d5546afd1b211c4bb828e918d98cd1157cf"} Oct 06 22:55:03 crc kubenswrapper[5014]: I1006 22:55:03.934973 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"bd2fa05e-ed1f-4444-9a99-e46f24a6a023","Type":"ContainerStarted","Data":"3e8eb8ae745aee04d0bd65cff27797ec48a1544860cb1f4bc96fcf8e6395135c"} Oct 06 22:55:03 crc kubenswrapper[5014]: I1006 22:55:03.944440 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b6tzl" event={"ID":"2f4fc548-e5d4-4e73-866f-d256190227b1","Type":"ContainerStarted","Data":"b62b49912175a742998a626678f6cc427a5c31727a853fe61c612372274eef59"} Oct 06 22:55:03 crc kubenswrapper[5014]: I1006 22:55:03.991614 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-b6tzl" podStartSLOduration=3.525322749 podStartE2EDuration="5.99158366s" podCreationTimestamp="2025-10-06 22:54:58 +0000 UTC" firstStartedPulling="2025-10-06 22:54:59.867122639 +0000 UTC m=+5045.160159413" lastFinishedPulling="2025-10-06 22:55:02.33338359 +0000 UTC m=+5047.626420324" observedRunningTime="2025-10-06 22:55:03.974045821 +0000 UTC m=+5049.267082565" watchObservedRunningTime="2025-10-06 22:55:03.99158366 +0000 UTC m=+5049.284620434" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.458696 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.483528 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-2_bd2fa05e-ed1f-4444-9a99-e46f24a6a023/mariadb-client-2/0.log" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.521672 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2"] Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.531422 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2"] Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.614186 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9cfw\" (UniqueName: \"kubernetes.io/projected/bd2fa05e-ed1f-4444-9a99-e46f24a6a023-kube-api-access-g9cfw\") pod \"bd2fa05e-ed1f-4444-9a99-e46f24a6a023\" (UID: \"bd2fa05e-ed1f-4444-9a99-e46f24a6a023\") " Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.622807 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd2fa05e-ed1f-4444-9a99-e46f24a6a023-kube-api-access-g9cfw" (OuterVolumeSpecName: "kube-api-access-g9cfw") pod "bd2fa05e-ed1f-4444-9a99-e46f24a6a023" (UID: "bd2fa05e-ed1f-4444-9a99-e46f24a6a023"). InnerVolumeSpecName "kube-api-access-g9cfw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.685973 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-sqg9p"] Oct 06 22:55:05 crc kubenswrapper[5014]: E1006 22:55:05.686265 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd2fa05e-ed1f-4444-9a99-e46f24a6a023" containerName="mariadb-client-2" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.686281 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd2fa05e-ed1f-4444-9a99-e46f24a6a023" containerName="mariadb-client-2" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.686467 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd2fa05e-ed1f-4444-9a99-e46f24a6a023" containerName="mariadb-client-2" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.687726 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.706239 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sqg9p"] Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.716523 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9cfw\" (UniqueName: \"kubernetes.io/projected/bd2fa05e-ed1f-4444-9a99-e46f24a6a023-kube-api-access-g9cfw\") on node \"crc\" DevicePath \"\"" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.817950 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ff4vr\" (UniqueName: \"kubernetes.io/projected/43e13ffe-b063-4e95-b84c-eb90e44162d9-kube-api-access-ff4vr\") pod \"redhat-marketplace-sqg9p\" (UID: \"43e13ffe-b063-4e95-b84c-eb90e44162d9\") " pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.818105 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43e13ffe-b063-4e95-b84c-eb90e44162d9-catalog-content\") pod \"redhat-marketplace-sqg9p\" (UID: \"43e13ffe-b063-4e95-b84c-eb90e44162d9\") " pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.818128 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43e13ffe-b063-4e95-b84c-eb90e44162d9-utilities\") pod \"redhat-marketplace-sqg9p\" (UID: \"43e13ffe-b063-4e95-b84c-eb90e44162d9\") " pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.919946 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43e13ffe-b063-4e95-b84c-eb90e44162d9-catalog-content\") pod \"redhat-marketplace-sqg9p\" (UID: \"43e13ffe-b063-4e95-b84c-eb90e44162d9\") " pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.920015 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43e13ffe-b063-4e95-b84c-eb90e44162d9-utilities\") pod \"redhat-marketplace-sqg9p\" (UID: \"43e13ffe-b063-4e95-b84c-eb90e44162d9\") " pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.920096 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ff4vr\" (UniqueName: \"kubernetes.io/projected/43e13ffe-b063-4e95-b84c-eb90e44162d9-kube-api-access-ff4vr\") pod \"redhat-marketplace-sqg9p\" (UID: \"43e13ffe-b063-4e95-b84c-eb90e44162d9\") " pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.920436 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43e13ffe-b063-4e95-b84c-eb90e44162d9-catalog-content\") pod \"redhat-marketplace-sqg9p\" (UID: \"43e13ffe-b063-4e95-b84c-eb90e44162d9\") " pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.920568 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43e13ffe-b063-4e95-b84c-eb90e44162d9-utilities\") pod \"redhat-marketplace-sqg9p\" (UID: \"43e13ffe-b063-4e95-b84c-eb90e44162d9\") " pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.942816 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ff4vr\" (UniqueName: \"kubernetes.io/projected/43e13ffe-b063-4e95-b84c-eb90e44162d9-kube-api-access-ff4vr\") pod \"redhat-marketplace-sqg9p\" (UID: \"43e13ffe-b063-4e95-b84c-eb90e44162d9\") " pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.968675 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e8eb8ae745aee04d0bd65cff27797ec48a1544860cb1f4bc96fcf8e6395135c" Oct 06 22:55:05 crc kubenswrapper[5014]: I1006 22:55:05.968744 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Oct 06 22:55:06 crc kubenswrapper[5014]: I1006 22:55:06.015140 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:06 crc kubenswrapper[5014]: I1006 22:55:06.237376 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sqg9p"] Oct 06 22:55:06 crc kubenswrapper[5014]: W1006 22:55:06.242039 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod43e13ffe_b063_4e95_b84c_eb90e44162d9.slice/crio-197132639e19901701d3503f534abbebb68fb28e130ae13189814115cd5c59b1 WatchSource:0}: Error finding container 197132639e19901701d3503f534abbebb68fb28e130ae13189814115cd5c59b1: Status 404 returned error can't find the container with id 197132639e19901701d3503f534abbebb68fb28e130ae13189814115cd5c59b1 Oct 06 22:55:06 crc kubenswrapper[5014]: I1006 22:55:06.979822 5014 generic.go:334] "Generic (PLEG): container finished" podID="43e13ffe-b063-4e95-b84c-eb90e44162d9" containerID="a49bc486fa34dcb585145594a4a566dac3601947518bb30e50f5d91d0903f5d9" exitCode=0 Oct 06 22:55:06 crc kubenswrapper[5014]: I1006 22:55:06.980308 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sqg9p" event={"ID":"43e13ffe-b063-4e95-b84c-eb90e44162d9","Type":"ContainerDied","Data":"a49bc486fa34dcb585145594a4a566dac3601947518bb30e50f5d91d0903f5d9"} Oct 06 22:55:06 crc kubenswrapper[5014]: I1006 22:55:06.981574 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sqg9p" event={"ID":"43e13ffe-b063-4e95-b84c-eb90e44162d9","Type":"ContainerStarted","Data":"197132639e19901701d3503f534abbebb68fb28e130ae13189814115cd5c59b1"} Oct 06 22:55:07 crc kubenswrapper[5014]: I1006 22:55:07.504955 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd2fa05e-ed1f-4444-9a99-e46f24a6a023" path="/var/lib/kubelet/pods/bd2fa05e-ed1f-4444-9a99-e46f24a6a023/volumes" Oct 06 22:55:07 crc kubenswrapper[5014]: I1006 22:55:07.995536 5014 generic.go:334] "Generic (PLEG): container finished" podID="43e13ffe-b063-4e95-b84c-eb90e44162d9" containerID="4d3638760149cc4ff9b1b6fb1fb7c92e152849f6bbd679b1dd46a4aa4dff9a53" exitCode=0 Oct 06 22:55:07 crc kubenswrapper[5014]: I1006 22:55:07.995600 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sqg9p" event={"ID":"43e13ffe-b063-4e95-b84c-eb90e44162d9","Type":"ContainerDied","Data":"4d3638760149cc4ff9b1b6fb1fb7c92e152849f6bbd679b1dd46a4aa4dff9a53"} Oct 06 22:55:08 crc kubenswrapper[5014]: I1006 22:55:08.670544 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:55:08 crc kubenswrapper[5014]: I1006 22:55:08.670914 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:55:08 crc kubenswrapper[5014]: I1006 22:55:08.740919 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:55:09 crc kubenswrapper[5014]: I1006 22:55:09.015727 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sqg9p" event={"ID":"43e13ffe-b063-4e95-b84c-eb90e44162d9","Type":"ContainerStarted","Data":"cfbc0cec80b3d3650a62bb261e595146678db246affbe5f81a1f3e8c8642c0f4"} Oct 06 22:55:09 crc kubenswrapper[5014]: I1006 22:55:09.056062 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-sqg9p" podStartSLOduration=2.466894393 podStartE2EDuration="4.056045742s" podCreationTimestamp="2025-10-06 22:55:05 +0000 UTC" firstStartedPulling="2025-10-06 22:55:06.982789711 +0000 UTC m=+5052.275826485" lastFinishedPulling="2025-10-06 22:55:08.57194106 +0000 UTC m=+5053.864977834" observedRunningTime="2025-10-06 22:55:09.047577337 +0000 UTC m=+5054.340614081" watchObservedRunningTime="2025-10-06 22:55:09.056045742 +0000 UTC m=+5054.349082476" Oct 06 22:55:09 crc kubenswrapper[5014]: I1006 22:55:09.075796 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:55:11 crc kubenswrapper[5014]: I1006 22:55:11.083102 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b6tzl"] Oct 06 22:55:11 crc kubenswrapper[5014]: I1006 22:55:11.083956 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-b6tzl" podUID="2f4fc548-e5d4-4e73-866f-d256190227b1" containerName="registry-server" containerID="cri-o://b62b49912175a742998a626678f6cc427a5c31727a853fe61c612372274eef59" gracePeriod=2 Oct 06 22:55:12 crc kubenswrapper[5014]: I1006 22:55:12.044653 5014 generic.go:334] "Generic (PLEG): container finished" podID="2f4fc548-e5d4-4e73-866f-d256190227b1" containerID="b62b49912175a742998a626678f6cc427a5c31727a853fe61c612372274eef59" exitCode=0 Oct 06 22:55:12 crc kubenswrapper[5014]: I1006 22:55:12.044845 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b6tzl" event={"ID":"2f4fc548-e5d4-4e73-866f-d256190227b1","Type":"ContainerDied","Data":"b62b49912175a742998a626678f6cc427a5c31727a853fe61c612372274eef59"} Oct 06 22:55:12 crc kubenswrapper[5014]: I1006 22:55:12.119488 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:55:12 crc kubenswrapper[5014]: I1006 22:55:12.181879 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2d4t\" (UniqueName: \"kubernetes.io/projected/2f4fc548-e5d4-4e73-866f-d256190227b1-kube-api-access-z2d4t\") pod \"2f4fc548-e5d4-4e73-866f-d256190227b1\" (UID: \"2f4fc548-e5d4-4e73-866f-d256190227b1\") " Oct 06 22:55:12 crc kubenswrapper[5014]: I1006 22:55:12.181991 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f4fc548-e5d4-4e73-866f-d256190227b1-catalog-content\") pod \"2f4fc548-e5d4-4e73-866f-d256190227b1\" (UID: \"2f4fc548-e5d4-4e73-866f-d256190227b1\") " Oct 06 22:55:12 crc kubenswrapper[5014]: I1006 22:55:12.188962 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f4fc548-e5d4-4e73-866f-d256190227b1-kube-api-access-z2d4t" (OuterVolumeSpecName: "kube-api-access-z2d4t") pod "2f4fc548-e5d4-4e73-866f-d256190227b1" (UID: "2f4fc548-e5d4-4e73-866f-d256190227b1"). InnerVolumeSpecName "kube-api-access-z2d4t". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:55:12 crc kubenswrapper[5014]: I1006 22:55:12.231064 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f4fc548-e5d4-4e73-866f-d256190227b1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2f4fc548-e5d4-4e73-866f-d256190227b1" (UID: "2f4fc548-e5d4-4e73-866f-d256190227b1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:55:12 crc kubenswrapper[5014]: I1006 22:55:12.282765 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f4fc548-e5d4-4e73-866f-d256190227b1-utilities\") pod \"2f4fc548-e5d4-4e73-866f-d256190227b1\" (UID: \"2f4fc548-e5d4-4e73-866f-d256190227b1\") " Oct 06 22:55:12 crc kubenswrapper[5014]: I1006 22:55:12.283163 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f4fc548-e5d4-4e73-866f-d256190227b1-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:55:12 crc kubenswrapper[5014]: I1006 22:55:12.283179 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2d4t\" (UniqueName: \"kubernetes.io/projected/2f4fc548-e5d4-4e73-866f-d256190227b1-kube-api-access-z2d4t\") on node \"crc\" DevicePath \"\"" Oct 06 22:55:12 crc kubenswrapper[5014]: I1006 22:55:12.283698 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f4fc548-e5d4-4e73-866f-d256190227b1-utilities" (OuterVolumeSpecName: "utilities") pod "2f4fc548-e5d4-4e73-866f-d256190227b1" (UID: "2f4fc548-e5d4-4e73-866f-d256190227b1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:55:12 crc kubenswrapper[5014]: I1006 22:55:12.385242 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f4fc548-e5d4-4e73-866f-d256190227b1-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:55:13 crc kubenswrapper[5014]: I1006 22:55:13.058562 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b6tzl" event={"ID":"2f4fc548-e5d4-4e73-866f-d256190227b1","Type":"ContainerDied","Data":"d88ec8513648b1283f5585a926ba866a004a41e463eb6e5f70cea743173541c0"} Oct 06 22:55:13 crc kubenswrapper[5014]: I1006 22:55:13.058670 5014 scope.go:117] "RemoveContainer" containerID="b62b49912175a742998a626678f6cc427a5c31727a853fe61c612372274eef59" Oct 06 22:55:13 crc kubenswrapper[5014]: I1006 22:55:13.058703 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b6tzl" Oct 06 22:55:13 crc kubenswrapper[5014]: I1006 22:55:13.098909 5014 scope.go:117] "RemoveContainer" containerID="bd0b8dc3ad8f3e913bac9e2f2a58b5b8f5fdf8c6fb8e6dd8a7485ace446a6f7c" Oct 06 22:55:13 crc kubenswrapper[5014]: I1006 22:55:13.110790 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b6tzl"] Oct 06 22:55:13 crc kubenswrapper[5014]: I1006 22:55:13.126269 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-b6tzl"] Oct 06 22:55:13 crc kubenswrapper[5014]: I1006 22:55:13.147601 5014 scope.go:117] "RemoveContainer" containerID="15eced8f2a77e53b3b21bddd61ef27f5a79e7ef64e61e794ae608163d20654ed" Oct 06 22:55:13 crc kubenswrapper[5014]: I1006 22:55:13.504167 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f4fc548-e5d4-4e73-866f-d256190227b1" path="/var/lib/kubelet/pods/2f4fc548-e5d4-4e73-866f-d256190227b1/volumes" Oct 06 22:55:16 crc kubenswrapper[5014]: I1006 22:55:16.015327 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:16 crc kubenswrapper[5014]: I1006 22:55:16.015688 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:16 crc kubenswrapper[5014]: I1006 22:55:16.095652 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:16 crc kubenswrapper[5014]: I1006 22:55:16.153701 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:17 crc kubenswrapper[5014]: I1006 22:55:17.081302 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sqg9p"] Oct 06 22:55:18 crc kubenswrapper[5014]: I1006 22:55:18.114724 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-sqg9p" podUID="43e13ffe-b063-4e95-b84c-eb90e44162d9" containerName="registry-server" containerID="cri-o://cfbc0cec80b3d3650a62bb261e595146678db246affbe5f81a1f3e8c8642c0f4" gracePeriod=2 Oct 06 22:55:18 crc kubenswrapper[5014]: I1006 22:55:18.795952 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:18 crc kubenswrapper[5014]: I1006 22:55:18.910028 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43e13ffe-b063-4e95-b84c-eb90e44162d9-catalog-content\") pod \"43e13ffe-b063-4e95-b84c-eb90e44162d9\" (UID: \"43e13ffe-b063-4e95-b84c-eb90e44162d9\") " Oct 06 22:55:18 crc kubenswrapper[5014]: I1006 22:55:18.910100 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ff4vr\" (UniqueName: \"kubernetes.io/projected/43e13ffe-b063-4e95-b84c-eb90e44162d9-kube-api-access-ff4vr\") pod \"43e13ffe-b063-4e95-b84c-eb90e44162d9\" (UID: \"43e13ffe-b063-4e95-b84c-eb90e44162d9\") " Oct 06 22:55:18 crc kubenswrapper[5014]: I1006 22:55:18.911317 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43e13ffe-b063-4e95-b84c-eb90e44162d9-utilities\") pod \"43e13ffe-b063-4e95-b84c-eb90e44162d9\" (UID: \"43e13ffe-b063-4e95-b84c-eb90e44162d9\") " Oct 06 22:55:18 crc kubenswrapper[5014]: I1006 22:55:18.912834 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43e13ffe-b063-4e95-b84c-eb90e44162d9-utilities" (OuterVolumeSpecName: "utilities") pod "43e13ffe-b063-4e95-b84c-eb90e44162d9" (UID: "43e13ffe-b063-4e95-b84c-eb90e44162d9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:55:18 crc kubenswrapper[5014]: I1006 22:55:18.916965 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43e13ffe-b063-4e95-b84c-eb90e44162d9-kube-api-access-ff4vr" (OuterVolumeSpecName: "kube-api-access-ff4vr") pod "43e13ffe-b063-4e95-b84c-eb90e44162d9" (UID: "43e13ffe-b063-4e95-b84c-eb90e44162d9"). InnerVolumeSpecName "kube-api-access-ff4vr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:55:18 crc kubenswrapper[5014]: I1006 22:55:18.928294 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43e13ffe-b063-4e95-b84c-eb90e44162d9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "43e13ffe-b063-4e95-b84c-eb90e44162d9" (UID: "43e13ffe-b063-4e95-b84c-eb90e44162d9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.013283 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/43e13ffe-b063-4e95-b84c-eb90e44162d9-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.013321 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/43e13ffe-b063-4e95-b84c-eb90e44162d9-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.013339 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ff4vr\" (UniqueName: \"kubernetes.io/projected/43e13ffe-b063-4e95-b84c-eb90e44162d9-kube-api-access-ff4vr\") on node \"crc\" DevicePath \"\"" Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.126562 5014 generic.go:334] "Generic (PLEG): container finished" podID="43e13ffe-b063-4e95-b84c-eb90e44162d9" containerID="cfbc0cec80b3d3650a62bb261e595146678db246affbe5f81a1f3e8c8642c0f4" exitCode=0 Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.126654 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sqg9p" event={"ID":"43e13ffe-b063-4e95-b84c-eb90e44162d9","Type":"ContainerDied","Data":"cfbc0cec80b3d3650a62bb261e595146678db246affbe5f81a1f3e8c8642c0f4"} Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.126688 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sqg9p" Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.126706 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sqg9p" event={"ID":"43e13ffe-b063-4e95-b84c-eb90e44162d9","Type":"ContainerDied","Data":"197132639e19901701d3503f534abbebb68fb28e130ae13189814115cd5c59b1"} Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.126736 5014 scope.go:117] "RemoveContainer" containerID="cfbc0cec80b3d3650a62bb261e595146678db246affbe5f81a1f3e8c8642c0f4" Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.160208 5014 scope.go:117] "RemoveContainer" containerID="4d3638760149cc4ff9b1b6fb1fb7c92e152849f6bbd679b1dd46a4aa4dff9a53" Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.179840 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sqg9p"] Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.189935 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-sqg9p"] Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.197565 5014 scope.go:117] "RemoveContainer" containerID="a49bc486fa34dcb585145594a4a566dac3601947518bb30e50f5d91d0903f5d9" Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.237843 5014 scope.go:117] "RemoveContainer" containerID="cfbc0cec80b3d3650a62bb261e595146678db246affbe5f81a1f3e8c8642c0f4" Oct 06 22:55:19 crc kubenswrapper[5014]: E1006 22:55:19.238649 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cfbc0cec80b3d3650a62bb261e595146678db246affbe5f81a1f3e8c8642c0f4\": container with ID starting with cfbc0cec80b3d3650a62bb261e595146678db246affbe5f81a1f3e8c8642c0f4 not found: ID does not exist" containerID="cfbc0cec80b3d3650a62bb261e595146678db246affbe5f81a1f3e8c8642c0f4" Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.238704 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfbc0cec80b3d3650a62bb261e595146678db246affbe5f81a1f3e8c8642c0f4"} err="failed to get container status \"cfbc0cec80b3d3650a62bb261e595146678db246affbe5f81a1f3e8c8642c0f4\": rpc error: code = NotFound desc = could not find container \"cfbc0cec80b3d3650a62bb261e595146678db246affbe5f81a1f3e8c8642c0f4\": container with ID starting with cfbc0cec80b3d3650a62bb261e595146678db246affbe5f81a1f3e8c8642c0f4 not found: ID does not exist" Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.238741 5014 scope.go:117] "RemoveContainer" containerID="4d3638760149cc4ff9b1b6fb1fb7c92e152849f6bbd679b1dd46a4aa4dff9a53" Oct 06 22:55:19 crc kubenswrapper[5014]: E1006 22:55:19.239294 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d3638760149cc4ff9b1b6fb1fb7c92e152849f6bbd679b1dd46a4aa4dff9a53\": container with ID starting with 4d3638760149cc4ff9b1b6fb1fb7c92e152849f6bbd679b1dd46a4aa4dff9a53 not found: ID does not exist" containerID="4d3638760149cc4ff9b1b6fb1fb7c92e152849f6bbd679b1dd46a4aa4dff9a53" Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.239543 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d3638760149cc4ff9b1b6fb1fb7c92e152849f6bbd679b1dd46a4aa4dff9a53"} err="failed to get container status \"4d3638760149cc4ff9b1b6fb1fb7c92e152849f6bbd679b1dd46a4aa4dff9a53\": rpc error: code = NotFound desc = could not find container \"4d3638760149cc4ff9b1b6fb1fb7c92e152849f6bbd679b1dd46a4aa4dff9a53\": container with ID starting with 4d3638760149cc4ff9b1b6fb1fb7c92e152849f6bbd679b1dd46a4aa4dff9a53 not found: ID does not exist" Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.239587 5014 scope.go:117] "RemoveContainer" containerID="a49bc486fa34dcb585145594a4a566dac3601947518bb30e50f5d91d0903f5d9" Oct 06 22:55:19 crc kubenswrapper[5014]: E1006 22:55:19.240254 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a49bc486fa34dcb585145594a4a566dac3601947518bb30e50f5d91d0903f5d9\": container with ID starting with a49bc486fa34dcb585145594a4a566dac3601947518bb30e50f5d91d0903f5d9 not found: ID does not exist" containerID="a49bc486fa34dcb585145594a4a566dac3601947518bb30e50f5d91d0903f5d9" Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.240302 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a49bc486fa34dcb585145594a4a566dac3601947518bb30e50f5d91d0903f5d9"} err="failed to get container status \"a49bc486fa34dcb585145594a4a566dac3601947518bb30e50f5d91d0903f5d9\": rpc error: code = NotFound desc = could not find container \"a49bc486fa34dcb585145594a4a566dac3601947518bb30e50f5d91d0903f5d9\": container with ID starting with a49bc486fa34dcb585145594a4a566dac3601947518bb30e50f5d91d0903f5d9 not found: ID does not exist" Oct 06 22:55:19 crc kubenswrapper[5014]: I1006 22:55:19.501238 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43e13ffe-b063-4e95-b84c-eb90e44162d9" path="/var/lib/kubelet/pods/43e13ffe-b063-4e95-b84c-eb90e44162d9/volumes" Oct 06 22:57:03 crc kubenswrapper[5014]: I1006 22:57:03.141316 5014 scope.go:117] "RemoveContainer" containerID="e7a7316e2c8877d1734a014323f6fea5c478a06c0b8281a9a9e8a4fb416a74fe" Oct 06 22:57:21 crc kubenswrapper[5014]: I1006 22:57:21.735091 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:57:21 crc kubenswrapper[5014]: I1006 22:57:21.735975 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:57:50 crc kubenswrapper[5014]: I1006 22:57:50.847029 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-t5gct"] Oct 06 22:57:50 crc kubenswrapper[5014]: E1006 22:57:50.850529 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43e13ffe-b063-4e95-b84c-eb90e44162d9" containerName="extract-content" Oct 06 22:57:50 crc kubenswrapper[5014]: I1006 22:57:50.850810 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="43e13ffe-b063-4e95-b84c-eb90e44162d9" containerName="extract-content" Oct 06 22:57:50 crc kubenswrapper[5014]: E1006 22:57:50.850993 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43e13ffe-b063-4e95-b84c-eb90e44162d9" containerName="registry-server" Oct 06 22:57:50 crc kubenswrapper[5014]: I1006 22:57:50.851114 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="43e13ffe-b063-4e95-b84c-eb90e44162d9" containerName="registry-server" Oct 06 22:57:50 crc kubenswrapper[5014]: E1006 22:57:50.851304 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f4fc548-e5d4-4e73-866f-d256190227b1" containerName="registry-server" Oct 06 22:57:50 crc kubenswrapper[5014]: I1006 22:57:50.851451 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f4fc548-e5d4-4e73-866f-d256190227b1" containerName="registry-server" Oct 06 22:57:50 crc kubenswrapper[5014]: E1006 22:57:50.851610 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f4fc548-e5d4-4e73-866f-d256190227b1" containerName="extract-content" Oct 06 22:57:50 crc kubenswrapper[5014]: I1006 22:57:50.851760 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f4fc548-e5d4-4e73-866f-d256190227b1" containerName="extract-content" Oct 06 22:57:50 crc kubenswrapper[5014]: E1006 22:57:50.851936 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43e13ffe-b063-4e95-b84c-eb90e44162d9" containerName="extract-utilities" Oct 06 22:57:50 crc kubenswrapper[5014]: I1006 22:57:50.852058 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="43e13ffe-b063-4e95-b84c-eb90e44162d9" containerName="extract-utilities" Oct 06 22:57:50 crc kubenswrapper[5014]: E1006 22:57:50.852189 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f4fc548-e5d4-4e73-866f-d256190227b1" containerName="extract-utilities" Oct 06 22:57:50 crc kubenswrapper[5014]: I1006 22:57:50.852300 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f4fc548-e5d4-4e73-866f-d256190227b1" containerName="extract-utilities" Oct 06 22:57:50 crc kubenswrapper[5014]: I1006 22:57:50.852728 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="43e13ffe-b063-4e95-b84c-eb90e44162d9" containerName="registry-server" Oct 06 22:57:50 crc kubenswrapper[5014]: I1006 22:57:50.852890 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f4fc548-e5d4-4e73-866f-d256190227b1" containerName="registry-server" Oct 06 22:57:50 crc kubenswrapper[5014]: I1006 22:57:50.855049 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:57:50 crc kubenswrapper[5014]: I1006 22:57:50.857433 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-t5gct"] Oct 06 22:57:50 crc kubenswrapper[5014]: I1006 22:57:50.948288 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcc3d012-7638-475a-b598-9b035c790123-catalog-content\") pod \"certified-operators-t5gct\" (UID: \"dcc3d012-7638-475a-b598-9b035c790123\") " pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:57:50 crc kubenswrapper[5014]: I1006 22:57:50.948346 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcc3d012-7638-475a-b598-9b035c790123-utilities\") pod \"certified-operators-t5gct\" (UID: \"dcc3d012-7638-475a-b598-9b035c790123\") " pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:57:50 crc kubenswrapper[5014]: I1006 22:57:50.948369 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8bb9\" (UniqueName: \"kubernetes.io/projected/dcc3d012-7638-475a-b598-9b035c790123-kube-api-access-l8bb9\") pod \"certified-operators-t5gct\" (UID: \"dcc3d012-7638-475a-b598-9b035c790123\") " pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:57:51 crc kubenswrapper[5014]: I1006 22:57:51.049531 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcc3d012-7638-475a-b598-9b035c790123-catalog-content\") pod \"certified-operators-t5gct\" (UID: \"dcc3d012-7638-475a-b598-9b035c790123\") " pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:57:51 crc kubenswrapper[5014]: I1006 22:57:51.049589 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcc3d012-7638-475a-b598-9b035c790123-utilities\") pod \"certified-operators-t5gct\" (UID: \"dcc3d012-7638-475a-b598-9b035c790123\") " pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:57:51 crc kubenswrapper[5014]: I1006 22:57:51.049610 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bb9\" (UniqueName: \"kubernetes.io/projected/dcc3d012-7638-475a-b598-9b035c790123-kube-api-access-l8bb9\") pod \"certified-operators-t5gct\" (UID: \"dcc3d012-7638-475a-b598-9b035c790123\") " pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:57:51 crc kubenswrapper[5014]: I1006 22:57:51.050136 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcc3d012-7638-475a-b598-9b035c790123-catalog-content\") pod \"certified-operators-t5gct\" (UID: \"dcc3d012-7638-475a-b598-9b035c790123\") " pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:57:51 crc kubenswrapper[5014]: I1006 22:57:51.050245 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcc3d012-7638-475a-b598-9b035c790123-utilities\") pod \"certified-operators-t5gct\" (UID: \"dcc3d012-7638-475a-b598-9b035c790123\") " pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:57:51 crc kubenswrapper[5014]: I1006 22:57:51.083109 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8bb9\" (UniqueName: \"kubernetes.io/projected/dcc3d012-7638-475a-b598-9b035c790123-kube-api-access-l8bb9\") pod \"certified-operators-t5gct\" (UID: \"dcc3d012-7638-475a-b598-9b035c790123\") " pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:57:51 crc kubenswrapper[5014]: I1006 22:57:51.229603 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:57:51 crc kubenswrapper[5014]: I1006 22:57:51.706869 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-t5gct"] Oct 06 22:57:51 crc kubenswrapper[5014]: I1006 22:57:51.735300 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:57:51 crc kubenswrapper[5014]: I1006 22:57:51.735367 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:57:51 crc kubenswrapper[5014]: I1006 22:57:51.743704 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t5gct" event={"ID":"dcc3d012-7638-475a-b598-9b035c790123","Type":"ContainerStarted","Data":"f0d3692fcfa0577f38391edae9cb4d56784ed0132de6ed3179ef5dae3390e364"} Oct 06 22:57:52 crc kubenswrapper[5014]: I1006 22:57:52.757646 5014 generic.go:334] "Generic (PLEG): container finished" podID="dcc3d012-7638-475a-b598-9b035c790123" containerID="a6421977517cb954a65ec43a248b22a08f487d19b82e3f12fa6987f4bfad84fd" exitCode=0 Oct 06 22:57:52 crc kubenswrapper[5014]: I1006 22:57:52.757733 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t5gct" event={"ID":"dcc3d012-7638-475a-b598-9b035c790123","Type":"ContainerDied","Data":"a6421977517cb954a65ec43a248b22a08f487d19b82e3f12fa6987f4bfad84fd"} Oct 06 22:57:52 crc kubenswrapper[5014]: I1006 22:57:52.761289 5014 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 06 22:57:54 crc kubenswrapper[5014]: I1006 22:57:54.781500 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t5gct" event={"ID":"dcc3d012-7638-475a-b598-9b035c790123","Type":"ContainerStarted","Data":"301c18bb79d938c0b5ff5ae7f9d02da55b733718fe221054489ee23248a3f17f"} Oct 06 22:57:55 crc kubenswrapper[5014]: I1006 22:57:55.793213 5014 generic.go:334] "Generic (PLEG): container finished" podID="dcc3d012-7638-475a-b598-9b035c790123" containerID="301c18bb79d938c0b5ff5ae7f9d02da55b733718fe221054489ee23248a3f17f" exitCode=0 Oct 06 22:57:55 crc kubenswrapper[5014]: I1006 22:57:55.793257 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t5gct" event={"ID":"dcc3d012-7638-475a-b598-9b035c790123","Type":"ContainerDied","Data":"301c18bb79d938c0b5ff5ae7f9d02da55b733718fe221054489ee23248a3f17f"} Oct 06 22:57:56 crc kubenswrapper[5014]: I1006 22:57:56.805734 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t5gct" event={"ID":"dcc3d012-7638-475a-b598-9b035c790123","Type":"ContainerStarted","Data":"a22b8b9b90afe713e3457e3340d57c4460faa740d412e9cdf65474db405a75d2"} Oct 06 22:57:56 crc kubenswrapper[5014]: I1006 22:57:56.830588 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-t5gct" podStartSLOduration=3.373661756 podStartE2EDuration="6.830558193s" podCreationTimestamp="2025-10-06 22:57:50 +0000 UTC" firstStartedPulling="2025-10-06 22:57:52.760812884 +0000 UTC m=+5218.053849658" lastFinishedPulling="2025-10-06 22:57:56.217709351 +0000 UTC m=+5221.510746095" observedRunningTime="2025-10-06 22:57:56.826596739 +0000 UTC m=+5222.119633483" watchObservedRunningTime="2025-10-06 22:57:56.830558193 +0000 UTC m=+5222.123594967" Oct 06 22:58:01 crc kubenswrapper[5014]: I1006 22:58:01.231036 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:58:01 crc kubenswrapper[5014]: I1006 22:58:01.233517 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:58:01 crc kubenswrapper[5014]: I1006 22:58:01.294918 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:58:01 crc kubenswrapper[5014]: I1006 22:58:01.891803 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:58:01 crc kubenswrapper[5014]: I1006 22:58:01.972001 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-t5gct"] Oct 06 22:58:03 crc kubenswrapper[5014]: I1006 22:58:03.870411 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-t5gct" podUID="dcc3d012-7638-475a-b598-9b035c790123" containerName="registry-server" containerID="cri-o://a22b8b9b90afe713e3457e3340d57c4460faa740d412e9cdf65474db405a75d2" gracePeriod=2 Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.374282 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.503920 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcc3d012-7638-475a-b598-9b035c790123-catalog-content\") pod \"dcc3d012-7638-475a-b598-9b035c790123\" (UID: \"dcc3d012-7638-475a-b598-9b035c790123\") " Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.504012 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcc3d012-7638-475a-b598-9b035c790123-utilities\") pod \"dcc3d012-7638-475a-b598-9b035c790123\" (UID: \"dcc3d012-7638-475a-b598-9b035c790123\") " Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.504099 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8bb9\" (UniqueName: \"kubernetes.io/projected/dcc3d012-7638-475a-b598-9b035c790123-kube-api-access-l8bb9\") pod \"dcc3d012-7638-475a-b598-9b035c790123\" (UID: \"dcc3d012-7638-475a-b598-9b035c790123\") " Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.504918 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dcc3d012-7638-475a-b598-9b035c790123-utilities" (OuterVolumeSpecName: "utilities") pod "dcc3d012-7638-475a-b598-9b035c790123" (UID: "dcc3d012-7638-475a-b598-9b035c790123"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.518924 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcc3d012-7638-475a-b598-9b035c790123-kube-api-access-l8bb9" (OuterVolumeSpecName: "kube-api-access-l8bb9") pod "dcc3d012-7638-475a-b598-9b035c790123" (UID: "dcc3d012-7638-475a-b598-9b035c790123"). InnerVolumeSpecName "kube-api-access-l8bb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.577077 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dcc3d012-7638-475a-b598-9b035c790123-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dcc3d012-7638-475a-b598-9b035c790123" (UID: "dcc3d012-7638-475a-b598-9b035c790123"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.607041 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcc3d012-7638-475a-b598-9b035c790123-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.607109 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcc3d012-7638-475a-b598-9b035c790123-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.607124 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8bb9\" (UniqueName: \"kubernetes.io/projected/dcc3d012-7638-475a-b598-9b035c790123-kube-api-access-l8bb9\") on node \"crc\" DevicePath \"\"" Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.883441 5014 generic.go:334] "Generic (PLEG): container finished" podID="dcc3d012-7638-475a-b598-9b035c790123" containerID="a22b8b9b90afe713e3457e3340d57c4460faa740d412e9cdf65474db405a75d2" exitCode=0 Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.883555 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t5gct" Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.884420 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t5gct" event={"ID":"dcc3d012-7638-475a-b598-9b035c790123","Type":"ContainerDied","Data":"a22b8b9b90afe713e3457e3340d57c4460faa740d412e9cdf65474db405a75d2"} Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.884612 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t5gct" event={"ID":"dcc3d012-7638-475a-b598-9b035c790123","Type":"ContainerDied","Data":"f0d3692fcfa0577f38391edae9cb4d56784ed0132de6ed3179ef5dae3390e364"} Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.884681 5014 scope.go:117] "RemoveContainer" containerID="a22b8b9b90afe713e3457e3340d57c4460faa740d412e9cdf65474db405a75d2" Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.917116 5014 scope.go:117] "RemoveContainer" containerID="301c18bb79d938c0b5ff5ae7f9d02da55b733718fe221054489ee23248a3f17f" Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.937757 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-t5gct"] Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.944297 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-t5gct"] Oct 06 22:58:04 crc kubenswrapper[5014]: I1006 22:58:04.966012 5014 scope.go:117] "RemoveContainer" containerID="a6421977517cb954a65ec43a248b22a08f487d19b82e3f12fa6987f4bfad84fd" Oct 06 22:58:05 crc kubenswrapper[5014]: I1006 22:58:05.007960 5014 scope.go:117] "RemoveContainer" containerID="a22b8b9b90afe713e3457e3340d57c4460faa740d412e9cdf65474db405a75d2" Oct 06 22:58:05 crc kubenswrapper[5014]: E1006 22:58:05.008460 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a22b8b9b90afe713e3457e3340d57c4460faa740d412e9cdf65474db405a75d2\": container with ID starting with a22b8b9b90afe713e3457e3340d57c4460faa740d412e9cdf65474db405a75d2 not found: ID does not exist" containerID="a22b8b9b90afe713e3457e3340d57c4460faa740d412e9cdf65474db405a75d2" Oct 06 22:58:05 crc kubenswrapper[5014]: I1006 22:58:05.008553 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a22b8b9b90afe713e3457e3340d57c4460faa740d412e9cdf65474db405a75d2"} err="failed to get container status \"a22b8b9b90afe713e3457e3340d57c4460faa740d412e9cdf65474db405a75d2\": rpc error: code = NotFound desc = could not find container \"a22b8b9b90afe713e3457e3340d57c4460faa740d412e9cdf65474db405a75d2\": container with ID starting with a22b8b9b90afe713e3457e3340d57c4460faa740d412e9cdf65474db405a75d2 not found: ID does not exist" Oct 06 22:58:05 crc kubenswrapper[5014]: I1006 22:58:05.008606 5014 scope.go:117] "RemoveContainer" containerID="301c18bb79d938c0b5ff5ae7f9d02da55b733718fe221054489ee23248a3f17f" Oct 06 22:58:05 crc kubenswrapper[5014]: E1006 22:58:05.009244 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"301c18bb79d938c0b5ff5ae7f9d02da55b733718fe221054489ee23248a3f17f\": container with ID starting with 301c18bb79d938c0b5ff5ae7f9d02da55b733718fe221054489ee23248a3f17f not found: ID does not exist" containerID="301c18bb79d938c0b5ff5ae7f9d02da55b733718fe221054489ee23248a3f17f" Oct 06 22:58:05 crc kubenswrapper[5014]: I1006 22:58:05.009316 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"301c18bb79d938c0b5ff5ae7f9d02da55b733718fe221054489ee23248a3f17f"} err="failed to get container status \"301c18bb79d938c0b5ff5ae7f9d02da55b733718fe221054489ee23248a3f17f\": rpc error: code = NotFound desc = could not find container \"301c18bb79d938c0b5ff5ae7f9d02da55b733718fe221054489ee23248a3f17f\": container with ID starting with 301c18bb79d938c0b5ff5ae7f9d02da55b733718fe221054489ee23248a3f17f not found: ID does not exist" Oct 06 22:58:05 crc kubenswrapper[5014]: I1006 22:58:05.009357 5014 scope.go:117] "RemoveContainer" containerID="a6421977517cb954a65ec43a248b22a08f487d19b82e3f12fa6987f4bfad84fd" Oct 06 22:58:05 crc kubenswrapper[5014]: E1006 22:58:05.009776 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6421977517cb954a65ec43a248b22a08f487d19b82e3f12fa6987f4bfad84fd\": container with ID starting with a6421977517cb954a65ec43a248b22a08f487d19b82e3f12fa6987f4bfad84fd not found: ID does not exist" containerID="a6421977517cb954a65ec43a248b22a08f487d19b82e3f12fa6987f4bfad84fd" Oct 06 22:58:05 crc kubenswrapper[5014]: I1006 22:58:05.009818 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6421977517cb954a65ec43a248b22a08f487d19b82e3f12fa6987f4bfad84fd"} err="failed to get container status \"a6421977517cb954a65ec43a248b22a08f487d19b82e3f12fa6987f4bfad84fd\": rpc error: code = NotFound desc = could not find container \"a6421977517cb954a65ec43a248b22a08f487d19b82e3f12fa6987f4bfad84fd\": container with ID starting with a6421977517cb954a65ec43a248b22a08f487d19b82e3f12fa6987f4bfad84fd not found: ID does not exist" Oct 06 22:58:05 crc kubenswrapper[5014]: I1006 22:58:05.502142 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dcc3d012-7638-475a-b598-9b035c790123" path="/var/lib/kubelet/pods/dcc3d012-7638-475a-b598-9b035c790123/volumes" Oct 06 22:58:21 crc kubenswrapper[5014]: I1006 22:58:21.735267 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 22:58:21 crc kubenswrapper[5014]: I1006 22:58:21.735975 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 22:58:21 crc kubenswrapper[5014]: I1006 22:58:21.736040 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 22:58:21 crc kubenswrapper[5014]: I1006 22:58:21.736921 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 22:58:21 crc kubenswrapper[5014]: I1006 22:58:21.737020 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" gracePeriod=600 Oct 06 22:58:21 crc kubenswrapper[5014]: E1006 22:58:21.861579 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:58:22 crc kubenswrapper[5014]: I1006 22:58:22.081075 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" exitCode=0 Oct 06 22:58:22 crc kubenswrapper[5014]: I1006 22:58:22.081123 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016"} Oct 06 22:58:22 crc kubenswrapper[5014]: I1006 22:58:22.081160 5014 scope.go:117] "RemoveContainer" containerID="f1b2dba782986714b78842b612ab901075cbfeaf1e1971b541c84d452a74bcc2" Oct 06 22:58:22 crc kubenswrapper[5014]: I1006 22:58:22.081970 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 22:58:22 crc kubenswrapper[5014]: E1006 22:58:22.082375 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:58:26 crc kubenswrapper[5014]: I1006 22:58:26.703789 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-copy-data"] Oct 06 22:58:26 crc kubenswrapper[5014]: E1006 22:58:26.704829 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcc3d012-7638-475a-b598-9b035c790123" containerName="extract-utilities" Oct 06 22:58:26 crc kubenswrapper[5014]: I1006 22:58:26.704853 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcc3d012-7638-475a-b598-9b035c790123" containerName="extract-utilities" Oct 06 22:58:26 crc kubenswrapper[5014]: E1006 22:58:26.704885 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcc3d012-7638-475a-b598-9b035c790123" containerName="registry-server" Oct 06 22:58:26 crc kubenswrapper[5014]: I1006 22:58:26.704923 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcc3d012-7638-475a-b598-9b035c790123" containerName="registry-server" Oct 06 22:58:26 crc kubenswrapper[5014]: E1006 22:58:26.704963 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcc3d012-7638-475a-b598-9b035c790123" containerName="extract-content" Oct 06 22:58:26 crc kubenswrapper[5014]: I1006 22:58:26.704975 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcc3d012-7638-475a-b598-9b035c790123" containerName="extract-content" Oct 06 22:58:26 crc kubenswrapper[5014]: I1006 22:58:26.705209 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcc3d012-7638-475a-b598-9b035c790123" containerName="registry-server" Oct 06 22:58:26 crc kubenswrapper[5014]: I1006 22:58:26.706050 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Oct 06 22:58:26 crc kubenswrapper[5014]: I1006 22:58:26.711719 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-5plzd" Oct 06 22:58:26 crc kubenswrapper[5014]: I1006 22:58:26.714970 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Oct 06 22:58:26 crc kubenswrapper[5014]: I1006 22:58:26.822547 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-48c6de94-391b-4026-a0e3-3fff276b9fa1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-48c6de94-391b-4026-a0e3-3fff276b9fa1\") pod \"mariadb-copy-data\" (UID: \"36650df3-b498-4a78-8d62-ade74cd09dc3\") " pod="openstack/mariadb-copy-data" Oct 06 22:58:26 crc kubenswrapper[5014]: I1006 22:58:26.822601 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2d4wl\" (UniqueName: \"kubernetes.io/projected/36650df3-b498-4a78-8d62-ade74cd09dc3-kube-api-access-2d4wl\") pod \"mariadb-copy-data\" (UID: \"36650df3-b498-4a78-8d62-ade74cd09dc3\") " pod="openstack/mariadb-copy-data" Oct 06 22:58:26 crc kubenswrapper[5014]: I1006 22:58:26.924136 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-48c6de94-391b-4026-a0e3-3fff276b9fa1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-48c6de94-391b-4026-a0e3-3fff276b9fa1\") pod \"mariadb-copy-data\" (UID: \"36650df3-b498-4a78-8d62-ade74cd09dc3\") " pod="openstack/mariadb-copy-data" Oct 06 22:58:26 crc kubenswrapper[5014]: I1006 22:58:26.924220 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2d4wl\" (UniqueName: \"kubernetes.io/projected/36650df3-b498-4a78-8d62-ade74cd09dc3-kube-api-access-2d4wl\") pod \"mariadb-copy-data\" (UID: \"36650df3-b498-4a78-8d62-ade74cd09dc3\") " pod="openstack/mariadb-copy-data" Oct 06 22:58:26 crc kubenswrapper[5014]: I1006 22:58:26.927085 5014 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 06 22:58:26 crc kubenswrapper[5014]: I1006 22:58:26.927292 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-48c6de94-391b-4026-a0e3-3fff276b9fa1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-48c6de94-391b-4026-a0e3-3fff276b9fa1\") pod \"mariadb-copy-data\" (UID: \"36650df3-b498-4a78-8d62-ade74cd09dc3\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ae0691166e7d8ad3613658a006b10eb297794a06cfd174dd0538baf2f6407ec8/globalmount\"" pod="openstack/mariadb-copy-data" Oct 06 22:58:26 crc kubenswrapper[5014]: I1006 22:58:26.956713 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2d4wl\" (UniqueName: \"kubernetes.io/projected/36650df3-b498-4a78-8d62-ade74cd09dc3-kube-api-access-2d4wl\") pod \"mariadb-copy-data\" (UID: \"36650df3-b498-4a78-8d62-ade74cd09dc3\") " pod="openstack/mariadb-copy-data" Oct 06 22:58:26 crc kubenswrapper[5014]: I1006 22:58:26.980866 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-48c6de94-391b-4026-a0e3-3fff276b9fa1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-48c6de94-391b-4026-a0e3-3fff276b9fa1\") pod \"mariadb-copy-data\" (UID: \"36650df3-b498-4a78-8d62-ade74cd09dc3\") " pod="openstack/mariadb-copy-data" Oct 06 22:58:27 crc kubenswrapper[5014]: I1006 22:58:27.034445 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Oct 06 22:58:27 crc kubenswrapper[5014]: I1006 22:58:27.631439 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Oct 06 22:58:27 crc kubenswrapper[5014]: W1006 22:58:27.641459 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod36650df3_b498_4a78_8d62_ade74cd09dc3.slice/crio-728fbdf755ef2799cf414e23bd08529a81f2f3852a92467b9e557649eeccec32 WatchSource:0}: Error finding container 728fbdf755ef2799cf414e23bd08529a81f2f3852a92467b9e557649eeccec32: Status 404 returned error can't find the container with id 728fbdf755ef2799cf414e23bd08529a81f2f3852a92467b9e557649eeccec32 Oct 06 22:58:28 crc kubenswrapper[5014]: I1006 22:58:28.143885 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"36650df3-b498-4a78-8d62-ade74cd09dc3","Type":"ContainerStarted","Data":"b3d02d6df47c4f9e95ebab2d8de2392e9ce7878509bbaf7ac4ae51075c56ddd8"} Oct 06 22:58:28 crc kubenswrapper[5014]: I1006 22:58:28.143975 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"36650df3-b498-4a78-8d62-ade74cd09dc3","Type":"ContainerStarted","Data":"728fbdf755ef2799cf414e23bd08529a81f2f3852a92467b9e557649eeccec32"} Oct 06 22:58:28 crc kubenswrapper[5014]: I1006 22:58:28.173046 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-copy-data" podStartSLOduration=3.173018811 podStartE2EDuration="3.173018811s" podCreationTimestamp="2025-10-06 22:58:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:58:28.162780852 +0000 UTC m=+5253.455817626" watchObservedRunningTime="2025-10-06 22:58:28.173018811 +0000 UTC m=+5253.466055586" Oct 06 22:58:30 crc kubenswrapper[5014]: I1006 22:58:30.529912 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Oct 06 22:58:30 crc kubenswrapper[5014]: I1006 22:58:30.532188 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Oct 06 22:58:30 crc kubenswrapper[5014]: I1006 22:58:30.539455 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Oct 06 22:58:30 crc kubenswrapper[5014]: I1006 22:58:30.690584 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72rd5\" (UniqueName: \"kubernetes.io/projected/49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e-kube-api-access-72rd5\") pod \"mariadb-client\" (UID: \"49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e\") " pod="openstack/mariadb-client" Oct 06 22:58:30 crc kubenswrapper[5014]: I1006 22:58:30.792509 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72rd5\" (UniqueName: \"kubernetes.io/projected/49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e-kube-api-access-72rd5\") pod \"mariadb-client\" (UID: \"49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e\") " pod="openstack/mariadb-client" Oct 06 22:58:30 crc kubenswrapper[5014]: I1006 22:58:30.826166 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72rd5\" (UniqueName: \"kubernetes.io/projected/49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e-kube-api-access-72rd5\") pod \"mariadb-client\" (UID: \"49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e\") " pod="openstack/mariadb-client" Oct 06 22:58:30 crc kubenswrapper[5014]: I1006 22:58:30.872933 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Oct 06 22:58:31 crc kubenswrapper[5014]: I1006 22:58:31.405527 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Oct 06 22:58:32 crc kubenswrapper[5014]: I1006 22:58:32.186760 5014 generic.go:334] "Generic (PLEG): container finished" podID="49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e" containerID="b14068296ce0d8948d483bf3157530f1642f40b4eb8657f880194f351d7cb1f8" exitCode=0 Oct 06 22:58:32 crc kubenswrapper[5014]: I1006 22:58:32.186848 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e","Type":"ContainerDied","Data":"b14068296ce0d8948d483bf3157530f1642f40b4eb8657f880194f351d7cb1f8"} Oct 06 22:58:32 crc kubenswrapper[5014]: I1006 22:58:32.187172 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e","Type":"ContainerStarted","Data":"8d472942b3a45d548b5d8440164511eb3c0da08f70c9de71579edeb917e74ec5"} Oct 06 22:58:33 crc kubenswrapper[5014]: I1006 22:58:33.641269 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Oct 06 22:58:33 crc kubenswrapper[5014]: I1006 22:58:33.668884 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e/mariadb-client/0.log" Oct 06 22:58:33 crc kubenswrapper[5014]: I1006 22:58:33.703276 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Oct 06 22:58:33 crc kubenswrapper[5014]: I1006 22:58:33.713191 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Oct 06 22:58:33 crc kubenswrapper[5014]: I1006 22:58:33.751795 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-72rd5\" (UniqueName: \"kubernetes.io/projected/49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e-kube-api-access-72rd5\") pod \"49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e\" (UID: \"49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e\") " Oct 06 22:58:33 crc kubenswrapper[5014]: I1006 22:58:33.758569 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e-kube-api-access-72rd5" (OuterVolumeSpecName: "kube-api-access-72rd5") pod "49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e" (UID: "49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e"). InnerVolumeSpecName "kube-api-access-72rd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:58:33 crc kubenswrapper[5014]: I1006 22:58:33.854774 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-72rd5\" (UniqueName: \"kubernetes.io/projected/49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e-kube-api-access-72rd5\") on node \"crc\" DevicePath \"\"" Oct 06 22:58:33 crc kubenswrapper[5014]: I1006 22:58:33.888116 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Oct 06 22:58:33 crc kubenswrapper[5014]: E1006 22:58:33.888529 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e" containerName="mariadb-client" Oct 06 22:58:33 crc kubenswrapper[5014]: I1006 22:58:33.888556 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e" containerName="mariadb-client" Oct 06 22:58:33 crc kubenswrapper[5014]: I1006 22:58:33.888777 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e" containerName="mariadb-client" Oct 06 22:58:33 crc kubenswrapper[5014]: I1006 22:58:33.889437 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Oct 06 22:58:33 crc kubenswrapper[5014]: I1006 22:58:33.894581 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Oct 06 22:58:33 crc kubenswrapper[5014]: I1006 22:58:33.957295 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxh8x\" (UniqueName: \"kubernetes.io/projected/3783a329-d0c7-45c1-8461-4d6887ced7fa-kube-api-access-lxh8x\") pod \"mariadb-client\" (UID: \"3783a329-d0c7-45c1-8461-4d6887ced7fa\") " pod="openstack/mariadb-client" Oct 06 22:58:34 crc kubenswrapper[5014]: I1006 22:58:34.058897 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxh8x\" (UniqueName: \"kubernetes.io/projected/3783a329-d0c7-45c1-8461-4d6887ced7fa-kube-api-access-lxh8x\") pod \"mariadb-client\" (UID: \"3783a329-d0c7-45c1-8461-4d6887ced7fa\") " pod="openstack/mariadb-client" Oct 06 22:58:34 crc kubenswrapper[5014]: I1006 22:58:34.088212 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxh8x\" (UniqueName: \"kubernetes.io/projected/3783a329-d0c7-45c1-8461-4d6887ced7fa-kube-api-access-lxh8x\") pod \"mariadb-client\" (UID: \"3783a329-d0c7-45c1-8461-4d6887ced7fa\") " pod="openstack/mariadb-client" Oct 06 22:58:34 crc kubenswrapper[5014]: I1006 22:58:34.213706 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Oct 06 22:58:34 crc kubenswrapper[5014]: I1006 22:58:34.214491 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d472942b3a45d548b5d8440164511eb3c0da08f70c9de71579edeb917e74ec5" Oct 06 22:58:34 crc kubenswrapper[5014]: I1006 22:58:34.214679 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Oct 06 22:58:34 crc kubenswrapper[5014]: I1006 22:58:34.315124 5014 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/mariadb-client" oldPodUID="49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e" podUID="3783a329-d0c7-45c1-8461-4d6887ced7fa" Oct 06 22:58:34 crc kubenswrapper[5014]: I1006 22:58:34.484335 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 22:58:34 crc kubenswrapper[5014]: E1006 22:58:34.485090 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:58:34 crc kubenswrapper[5014]: I1006 22:58:34.798571 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Oct 06 22:58:34 crc kubenswrapper[5014]: W1006 22:58:34.806718 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3783a329_d0c7_45c1_8461_4d6887ced7fa.slice/crio-7e4fb1efd9bf09aabec15b086003c3086f3f37620addf9002105c07e8fe59bde WatchSource:0}: Error finding container 7e4fb1efd9bf09aabec15b086003c3086f3f37620addf9002105c07e8fe59bde: Status 404 returned error can't find the container with id 7e4fb1efd9bf09aabec15b086003c3086f3f37620addf9002105c07e8fe59bde Oct 06 22:58:35 crc kubenswrapper[5014]: I1006 22:58:35.229404 5014 generic.go:334] "Generic (PLEG): container finished" podID="3783a329-d0c7-45c1-8461-4d6887ced7fa" containerID="2fc390bd6833a786bc3cfeaee692a0e4f2b6a470c195732019b0ae98427ae5c6" exitCode=0 Oct 06 22:58:35 crc kubenswrapper[5014]: I1006 22:58:35.229477 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"3783a329-d0c7-45c1-8461-4d6887ced7fa","Type":"ContainerDied","Data":"2fc390bd6833a786bc3cfeaee692a0e4f2b6a470c195732019b0ae98427ae5c6"} Oct 06 22:58:35 crc kubenswrapper[5014]: I1006 22:58:35.229550 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"3783a329-d0c7-45c1-8461-4d6887ced7fa","Type":"ContainerStarted","Data":"7e4fb1efd9bf09aabec15b086003c3086f3f37620addf9002105c07e8fe59bde"} Oct 06 22:58:35 crc kubenswrapper[5014]: I1006 22:58:35.501508 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e" path="/var/lib/kubelet/pods/49d1a9e5-631b-4ab8-9a32-f6e6c46bf00e/volumes" Oct 06 22:58:36 crc kubenswrapper[5014]: I1006 22:58:36.644833 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Oct 06 22:58:36 crc kubenswrapper[5014]: I1006 22:58:36.669831 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_3783a329-d0c7-45c1-8461-4d6887ced7fa/mariadb-client/0.log" Oct 06 22:58:36 crc kubenswrapper[5014]: I1006 22:58:36.731440 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Oct 06 22:58:36 crc kubenswrapper[5014]: I1006 22:58:36.740700 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Oct 06 22:58:36 crc kubenswrapper[5014]: I1006 22:58:36.838575 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lxh8x\" (UniqueName: \"kubernetes.io/projected/3783a329-d0c7-45c1-8461-4d6887ced7fa-kube-api-access-lxh8x\") pod \"3783a329-d0c7-45c1-8461-4d6887ced7fa\" (UID: \"3783a329-d0c7-45c1-8461-4d6887ced7fa\") " Oct 06 22:58:36 crc kubenswrapper[5014]: I1006 22:58:36.847188 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3783a329-d0c7-45c1-8461-4d6887ced7fa-kube-api-access-lxh8x" (OuterVolumeSpecName: "kube-api-access-lxh8x") pod "3783a329-d0c7-45c1-8461-4d6887ced7fa" (UID: "3783a329-d0c7-45c1-8461-4d6887ced7fa"). InnerVolumeSpecName "kube-api-access-lxh8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:58:36 crc kubenswrapper[5014]: I1006 22:58:36.941045 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lxh8x\" (UniqueName: \"kubernetes.io/projected/3783a329-d0c7-45c1-8461-4d6887ced7fa-kube-api-access-lxh8x\") on node \"crc\" DevicePath \"\"" Oct 06 22:58:37 crc kubenswrapper[5014]: I1006 22:58:37.254716 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e4fb1efd9bf09aabec15b086003c3086f3f37620addf9002105c07e8fe59bde" Oct 06 22:58:37 crc kubenswrapper[5014]: I1006 22:58:37.255087 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Oct 06 22:58:37 crc kubenswrapper[5014]: I1006 22:58:37.503706 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3783a329-d0c7-45c1-8461-4d6887ced7fa" path="/var/lib/kubelet/pods/3783a329-d0c7-45c1-8461-4d6887ced7fa/volumes" Oct 06 22:58:48 crc kubenswrapper[5014]: I1006 22:58:48.484680 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 22:58:48 crc kubenswrapper[5014]: E1006 22:58:48.485889 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:58:59 crc kubenswrapper[5014]: I1006 22:58:59.483930 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 22:58:59 crc kubenswrapper[5014]: E1006 22:58:59.484541 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.238247 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 06 22:59:10 crc kubenswrapper[5014]: E1006 22:59:10.238961 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3783a329-d0c7-45c1-8461-4d6887ced7fa" containerName="mariadb-client" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.238974 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="3783a329-d0c7-45c1-8461-4d6887ced7fa" containerName="mariadb-client" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.239126 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="3783a329-d0c7-45c1-8461-4d6887ced7fa" containerName="mariadb-client" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.239861 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.243843 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.244216 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.245123 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-xtjjl" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.245654 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.253304 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.258127 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-1"] Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.260282 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.287035 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-2"] Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.290447 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.291364 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.338115 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/02489223-b511-40b5-8eaa-0aeee96a0e68-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.338190 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/02489223-b511-40b5-8eaa-0aeee96a0e68-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.338237 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/02489223-b511-40b5-8eaa-0aeee96a0e68-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.338268 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4ef35593-d6b1-4a9a-8aab-34af622cb859\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4ef35593-d6b1-4a9a-8aab-34af622cb859\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.338301 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.338358 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hx2h\" (UniqueName: \"kubernetes.io/projected/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-kube-api-access-7hx2h\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.338393 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.338441 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/02489223-b511-40b5-8eaa-0aeee96a0e68-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.338576 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-af238c33-e0cb-4ed8-b0b0-3b97d47b5ed7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-af238c33-e0cb-4ed8-b0b0-3b97d47b5ed7\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.338648 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87l9m\" (UniqueName: \"kubernetes.io/projected/02489223-b511-40b5-8eaa-0aeee96a0e68-kube-api-access-87l9m\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.338686 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.338794 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.339042 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.339093 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-config\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.339128 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02489223-b511-40b5-8eaa-0aeee96a0e68-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.339210 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02489223-b511-40b5-8eaa-0aeee96a0e68-config\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.339603 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.357124 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440008 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440065 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4f59n\" (UniqueName: \"kubernetes.io/projected/5728d872-705b-4439-965d-36634f78b9c3-kube-api-access-4f59n\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440108 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5728d872-705b-4439-965d-36634f78b9c3-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440134 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hx2h\" (UniqueName: \"kubernetes.io/projected/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-kube-api-access-7hx2h\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440162 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5728d872-705b-4439-965d-36634f78b9c3-config\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440185 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440221 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/02489223-b511-40b5-8eaa-0aeee96a0e68-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440245 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-af238c33-e0cb-4ed8-b0b0-3b97d47b5ed7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-af238c33-e0cb-4ed8-b0b0-3b97d47b5ed7\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440277 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5728d872-705b-4439-965d-36634f78b9c3-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440306 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87l9m\" (UniqueName: \"kubernetes.io/projected/02489223-b511-40b5-8eaa-0aeee96a0e68-kube-api-access-87l9m\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440331 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440362 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5728d872-705b-4439-965d-36634f78b9c3-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440394 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440429 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440452 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-config\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440477 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02489223-b511-40b5-8eaa-0aeee96a0e68-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440506 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5728d872-705b-4439-965d-36634f78b9c3-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440532 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02489223-b511-40b5-8eaa-0aeee96a0e68-config\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440555 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/02489223-b511-40b5-8eaa-0aeee96a0e68-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440584 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/02489223-b511-40b5-8eaa-0aeee96a0e68-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440607 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5728d872-705b-4439-965d-36634f78b9c3-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440652 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-cc744a69-2df3-4da4-bf35-114f8bb15bd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc744a69-2df3-4da4-bf35-114f8bb15bd5\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440681 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/02489223-b511-40b5-8eaa-0aeee96a0e68-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.440706 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4ef35593-d6b1-4a9a-8aab-34af622cb859\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4ef35593-d6b1-4a9a-8aab-34af622cb859\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.442432 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/02489223-b511-40b5-8eaa-0aeee96a0e68-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.442936 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/02489223-b511-40b5-8eaa-0aeee96a0e68-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.443033 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.443190 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.443540 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02489223-b511-40b5-8eaa-0aeee96a0e68-config\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.443599 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-config\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.444298 5014 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.444416 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-af238c33-e0cb-4ed8-b0b0-3b97d47b5ed7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-af238c33-e0cb-4ed8-b0b0-3b97d47b5ed7\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b31b59046f99a7c57983ddbe5fd35de606f361f272b20c14187927ad78ac9a3e/globalmount\"" pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.445586 5014 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.445612 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4ef35593-d6b1-4a9a-8aab-34af622cb859\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4ef35593-d6b1-4a9a-8aab-34af622cb859\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/20f370c4efe5be85710d2ca3d14d5c1cf040cdc1f957a675d44c1e7872e7bad9/globalmount\"" pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.447312 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/02489223-b511-40b5-8eaa-0aeee96a0e68-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.447570 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/02489223-b511-40b5-8eaa-0aeee96a0e68-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.448383 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.450353 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.459476 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hx2h\" (UniqueName: \"kubernetes.io/projected/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-kube-api-access-7hx2h\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.459759 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3399bbdd-5294-48b8-a026-a0ebbd3eddd1-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.461177 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02489223-b511-40b5-8eaa-0aeee96a0e68-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.462117 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87l9m\" (UniqueName: \"kubernetes.io/projected/02489223-b511-40b5-8eaa-0aeee96a0e68-kube-api-access-87l9m\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.491505 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-af238c33-e0cb-4ed8-b0b0-3b97d47b5ed7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-af238c33-e0cb-4ed8-b0b0-3b97d47b5ed7\") pod \"ovsdbserver-nb-1\" (UID: \"02489223-b511-40b5-8eaa-0aeee96a0e68\") " pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.493448 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4ef35593-d6b1-4a9a-8aab-34af622cb859\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4ef35593-d6b1-4a9a-8aab-34af622cb859\") pod \"ovsdbserver-nb-0\" (UID: \"3399bbdd-5294-48b8-a026-a0ebbd3eddd1\") " pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.542485 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5728d872-705b-4439-965d-36634f78b9c3-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.542531 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-cc744a69-2df3-4da4-bf35-114f8bb15bd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc744a69-2df3-4da4-bf35-114f8bb15bd5\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.542596 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4f59n\" (UniqueName: \"kubernetes.io/projected/5728d872-705b-4439-965d-36634f78b9c3-kube-api-access-4f59n\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.542675 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5728d872-705b-4439-965d-36634f78b9c3-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.542705 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5728d872-705b-4439-965d-36634f78b9c3-config\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.542778 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5728d872-705b-4439-965d-36634f78b9c3-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.542831 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5728d872-705b-4439-965d-36634f78b9c3-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.542885 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5728d872-705b-4439-965d-36634f78b9c3-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.543086 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5728d872-705b-4439-965d-36634f78b9c3-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.544400 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5728d872-705b-4439-965d-36634f78b9c3-config\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.544444 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5728d872-705b-4439-965d-36634f78b9c3-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.546176 5014 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.546211 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-cc744a69-2df3-4da4-bf35-114f8bb15bd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc744a69-2df3-4da4-bf35-114f8bb15bd5\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/67813f5753ec620b40c2b0e4c8512512f4072db1372f81189600e00523313918/globalmount\"" pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.546924 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5728d872-705b-4439-965d-36634f78b9c3-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.547227 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5728d872-705b-4439-965d-36634f78b9c3-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.548652 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5728d872-705b-4439-965d-36634f78b9c3-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.562318 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4f59n\" (UniqueName: \"kubernetes.io/projected/5728d872-705b-4439-965d-36634f78b9c3-kube-api-access-4f59n\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.566340 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.584510 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-cc744a69-2df3-4da4-bf35-114f8bb15bd5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cc744a69-2df3-4da4-bf35-114f8bb15bd5\") pod \"ovsdbserver-nb-2\" (UID: \"5728d872-705b-4439-965d-36634f78b9c3\") " pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.585128 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:10 crc kubenswrapper[5014]: I1006 22:59:10.650056 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:11 crc kubenswrapper[5014]: I1006 22:59:11.141596 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 06 22:59:11 crc kubenswrapper[5014]: I1006 22:59:11.250190 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Oct 06 22:59:11 crc kubenswrapper[5014]: W1006 22:59:11.255040 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5728d872_705b_4439_965d_36634f78b9c3.slice/crio-0a19ae7ba69cd4d18a44687468d73683faac3245e91f2b90fe3b2de995e96404 WatchSource:0}: Error finding container 0a19ae7ba69cd4d18a44687468d73683faac3245e91f2b90fe3b2de995e96404: Status 404 returned error can't find the container with id 0a19ae7ba69cd4d18a44687468d73683faac3245e91f2b90fe3b2de995e96404 Oct 06 22:59:11 crc kubenswrapper[5014]: I1006 22:59:11.610949 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"3399bbdd-5294-48b8-a026-a0ebbd3eddd1","Type":"ContainerStarted","Data":"14271c031448232059cd2f38736f66889eaa055be9b9ccb40f1e2c69f7c69f46"} Oct 06 22:59:11 crc kubenswrapper[5014]: I1006 22:59:11.610997 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"3399bbdd-5294-48b8-a026-a0ebbd3eddd1","Type":"ContainerStarted","Data":"28185b9b2f52d326962d7cdd19ba66a3470ced04a1cba9ad3289ee85d8ade38f"} Oct 06 22:59:11 crc kubenswrapper[5014]: I1006 22:59:11.611010 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"3399bbdd-5294-48b8-a026-a0ebbd3eddd1","Type":"ContainerStarted","Data":"15b26a69674cc67780c4e29f076e77213e4585023d0e287450f1ecbc1efd90dd"} Oct 06 22:59:11 crc kubenswrapper[5014]: I1006 22:59:11.613491 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"5728d872-705b-4439-965d-36634f78b9c3","Type":"ContainerStarted","Data":"eca1127a2e23d18b4f16fd407c19821f2cafe756189db07ee3f39943fd4f9120"} Oct 06 22:59:11 crc kubenswrapper[5014]: I1006 22:59:11.613520 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"5728d872-705b-4439-965d-36634f78b9c3","Type":"ContainerStarted","Data":"0a19ae7ba69cd4d18a44687468d73683faac3245e91f2b90fe3b2de995e96404"} Oct 06 22:59:11 crc kubenswrapper[5014]: I1006 22:59:11.634817 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=2.634802608 podStartE2EDuration="2.634802608s" podCreationTimestamp="2025-10-06 22:59:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:59:11.633826547 +0000 UTC m=+5296.926863281" watchObservedRunningTime="2025-10-06 22:59:11.634802608 +0000 UTC m=+5296.927839342" Oct 06 22:59:11 crc kubenswrapper[5014]: I1006 22:59:11.918929 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.114080 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.115340 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.118801 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-nz6n2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.118881 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.121402 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.122410 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.150277 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.168988 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-1"] Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.180829 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.182338 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-2"] Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.186189 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.212500 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.221525 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.277537 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.277599 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.277668 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/44f14dca-2c88-421b-8f41-302926b341bb-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.277700 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b6b4bae3-e0dc-49d0-897d-a769bdc8e34c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b6b4bae3-e0dc-49d0-897d-a769bdc8e34c\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.277725 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f14dca-2c88-421b-8f41-302926b341bb-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.277749 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/44f14dca-2c88-421b-8f41-302926b341bb-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.277771 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9916618a-700d-406e-b443-38423eafb158-config\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.277796 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.277827 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9916618a-700d-406e-b443-38423eafb158-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.277863 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.277885 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/44f14dca-2c88-421b-8f41-302926b341bb-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.277910 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-3786d482-b6e5-48cb-b587-cc9acae81a33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3786d482-b6e5-48cb-b587-cc9acae81a33\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.277955 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7pnt\" (UniqueName: \"kubernetes.io/projected/9916618a-700d-406e-b443-38423eafb158-kube-api-access-l7pnt\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.277979 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-config\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.278005 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.278027 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7e862dd4-713b-4c90-b774-40d82cee6cb1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e862dd4-713b-4c90-b774-40d82cee6cb1\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.278064 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/44f14dca-2c88-421b-8f41-302926b341bb-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.278087 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9nx5\" (UniqueName: \"kubernetes.io/projected/44f14dca-2c88-421b-8f41-302926b341bb-kube-api-access-c9nx5\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.278115 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9916618a-700d-406e-b443-38423eafb158-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.278137 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gf7s9\" (UniqueName: \"kubernetes.io/projected/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-kube-api-access-gf7s9\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.278157 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9916618a-700d-406e-b443-38423eafb158-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.278186 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9916618a-700d-406e-b443-38423eafb158-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.278209 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9916618a-700d-406e-b443-38423eafb158-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.278230 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44f14dca-2c88-421b-8f41-302926b341bb-config\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380138 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9916618a-700d-406e-b443-38423eafb158-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380204 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380231 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/44f14dca-2c88-421b-8f41-302926b341bb-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380265 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-3786d482-b6e5-48cb-b587-cc9acae81a33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3786d482-b6e5-48cb-b587-cc9acae81a33\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380309 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7pnt\" (UniqueName: \"kubernetes.io/projected/9916618a-700d-406e-b443-38423eafb158-kube-api-access-l7pnt\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380332 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-config\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380354 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380378 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7e862dd4-713b-4c90-b774-40d82cee6cb1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e862dd4-713b-4c90-b774-40d82cee6cb1\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380413 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/44f14dca-2c88-421b-8f41-302926b341bb-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380437 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9nx5\" (UniqueName: \"kubernetes.io/projected/44f14dca-2c88-421b-8f41-302926b341bb-kube-api-access-c9nx5\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380462 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9916618a-700d-406e-b443-38423eafb158-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380483 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gf7s9\" (UniqueName: \"kubernetes.io/projected/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-kube-api-access-gf7s9\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380502 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9916618a-700d-406e-b443-38423eafb158-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380530 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9916618a-700d-406e-b443-38423eafb158-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380550 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44f14dca-2c88-421b-8f41-302926b341bb-config\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380572 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9916618a-700d-406e-b443-38423eafb158-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380603 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380651 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380697 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/44f14dca-2c88-421b-8f41-302926b341bb-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380728 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b6b4bae3-e0dc-49d0-897d-a769bdc8e34c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b6b4bae3-e0dc-49d0-897d-a769bdc8e34c\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380749 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f14dca-2c88-421b-8f41-302926b341bb-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380769 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/44f14dca-2c88-421b-8f41-302926b341bb-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380792 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9916618a-700d-406e-b443-38423eafb158-config\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.380813 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.381806 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.382916 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9916618a-700d-406e-b443-38423eafb158-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.383148 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-config\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.383354 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/44f14dca-2c88-421b-8f41-302926b341bb-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.384097 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/44f14dca-2c88-421b-8f41-302926b341bb-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.385927 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.385957 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.386364 5014 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.386394 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b6b4bae3-e0dc-49d0-897d-a769bdc8e34c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b6b4bae3-e0dc-49d0-897d-a769bdc8e34c\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1574c42fa385c3958b5a378b1d80d41835a5af57641ba3c52293490620d892d6/globalmount\"" pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.386485 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44f14dca-2c88-421b-8f41-302926b341bb-config\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.386725 5014 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.386964 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7e862dd4-713b-4c90-b774-40d82cee6cb1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e862dd4-713b-4c90-b774-40d82cee6cb1\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/a3646d1bc2fa39086dda29b84552f07fd343f1add7432d109ec9bdbb9644188a/globalmount\"" pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.387038 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9916618a-700d-406e-b443-38423eafb158-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.387785 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.388229 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44f14dca-2c88-421b-8f41-302926b341bb-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.388580 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9916618a-700d-406e-b443-38423eafb158-config\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.388867 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9916618a-700d-406e-b443-38423eafb158-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.389084 5014 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.389378 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-3786d482-b6e5-48cb-b587-cc9acae81a33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3786d482-b6e5-48cb-b587-cc9acae81a33\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/04f0d81b9ffcd577b6c0657f92c2565835cedd2cc3f8dce08814f849e136b4ae/globalmount\"" pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.391645 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9916618a-700d-406e-b443-38423eafb158-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.393376 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/44f14dca-2c88-421b-8f41-302926b341bb-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.396270 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9916618a-700d-406e-b443-38423eafb158-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.396421 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.399546 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/44f14dca-2c88-421b-8f41-302926b341bb-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.401550 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gf7s9\" (UniqueName: \"kubernetes.io/projected/3d7105fa-62d5-469e-b4f3-774c6b1a5a90-kube-api-access-gf7s9\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.402026 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9nx5\" (UniqueName: \"kubernetes.io/projected/44f14dca-2c88-421b-8f41-302926b341bb-kube-api-access-c9nx5\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.403549 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7pnt\" (UniqueName: \"kubernetes.io/projected/9916618a-700d-406e-b443-38423eafb158-kube-api-access-l7pnt\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.422786 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b6b4bae3-e0dc-49d0-897d-a769bdc8e34c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b6b4bae3-e0dc-49d0-897d-a769bdc8e34c\") pod \"ovsdbserver-sb-0\" (UID: \"9916618a-700d-406e-b443-38423eafb158\") " pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.424110 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-3786d482-b6e5-48cb-b587-cc9acae81a33\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-3786d482-b6e5-48cb-b587-cc9acae81a33\") pod \"ovsdbserver-sb-1\" (UID: \"3d7105fa-62d5-469e-b4f3-774c6b1a5a90\") " pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.428214 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7e862dd4-713b-4c90-b774-40d82cee6cb1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7e862dd4-713b-4c90-b774-40d82cee6cb1\") pod \"ovsdbserver-sb-2\" (UID: \"44f14dca-2c88-421b-8f41-302926b341bb\") " pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.476442 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.496811 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.515154 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.642391 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"5728d872-705b-4439-965d-36634f78b9c3","Type":"ContainerStarted","Data":"b3f0be06b4c72a6725a31d540802fc75c27519f43b07e077757cd82c55234d30"} Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.653875 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"02489223-b511-40b5-8eaa-0aeee96a0e68","Type":"ContainerStarted","Data":"bb7638bfb20d8b3a884ef462bf574a50f7d2b8ccb102d5c748348b2618b1af64"} Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.653911 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"02489223-b511-40b5-8eaa-0aeee96a0e68","Type":"ContainerStarted","Data":"a14a6aa946fd776ddacb50c8dbcb671440365954be4afb283e3eac0e23b9ac3a"} Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.653924 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"02489223-b511-40b5-8eaa-0aeee96a0e68","Type":"ContainerStarted","Data":"d0678a5fbd93ad4389110f5416326e1355389ae06c8c3a80559d08645fcf5819"} Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.662754 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-2" podStartSLOduration=3.662735343 podStartE2EDuration="3.662735343s" podCreationTimestamp="2025-10-06 22:59:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:59:12.660994209 +0000 UTC m=+5297.954030943" watchObservedRunningTime="2025-10-06 22:59:12.662735343 +0000 UTC m=+5297.955772067" Oct 06 22:59:12 crc kubenswrapper[5014]: I1006 22:59:12.682828 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-1" podStartSLOduration=3.68281103 podStartE2EDuration="3.68281103s" podCreationTimestamp="2025-10-06 22:59:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:59:12.682109938 +0000 UTC m=+5297.975146682" watchObservedRunningTime="2025-10-06 22:59:12.68281103 +0000 UTC m=+5297.975847764" Oct 06 22:59:13 crc kubenswrapper[5014]: I1006 22:59:13.052844 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 06 22:59:13 crc kubenswrapper[5014]: I1006 22:59:13.126367 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Oct 06 22:59:13 crc kubenswrapper[5014]: W1006 22:59:13.135230 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d7105fa_62d5_469e_b4f3_774c6b1a5a90.slice/crio-21ac0bdce91c3f69e4c28b9d3531b4c92c623ee3b2bb615e5ad088655a626d45 WatchSource:0}: Error finding container 21ac0bdce91c3f69e4c28b9d3531b4c92c623ee3b2bb615e5ad088655a626d45: Status 404 returned error can't find the container with id 21ac0bdce91c3f69e4c28b9d3531b4c92c623ee3b2bb615e5ad088655a626d45 Oct 06 22:59:13 crc kubenswrapper[5014]: I1006 22:59:13.484649 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 22:59:13 crc kubenswrapper[5014]: E1006 22:59:13.485225 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:59:13 crc kubenswrapper[5014]: I1006 22:59:13.566900 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:13 crc kubenswrapper[5014]: I1006 22:59:13.585922 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:13 crc kubenswrapper[5014]: I1006 22:59:13.646869 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Oct 06 22:59:13 crc kubenswrapper[5014]: I1006 22:59:13.651124 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:13 crc kubenswrapper[5014]: W1006 22:59:13.654465 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod44f14dca_2c88_421b_8f41_302926b341bb.slice/crio-1a5023178d387d12bce36e1a3dc3b113cda3f6ec53d21b1971d012a053936e8a WatchSource:0}: Error finding container 1a5023178d387d12bce36e1a3dc3b113cda3f6ec53d21b1971d012a053936e8a: Status 404 returned error can't find the container with id 1a5023178d387d12bce36e1a3dc3b113cda3f6ec53d21b1971d012a053936e8a Oct 06 22:59:13 crc kubenswrapper[5014]: I1006 22:59:13.667156 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"3d7105fa-62d5-469e-b4f3-774c6b1a5a90","Type":"ContainerStarted","Data":"53a696dc0ce8f85f8f11a560e741705740c2279fd581e6dec365bd1f5a16e831"} Oct 06 22:59:13 crc kubenswrapper[5014]: I1006 22:59:13.667202 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"3d7105fa-62d5-469e-b4f3-774c6b1a5a90","Type":"ContainerStarted","Data":"d207beb9be554cc3ff519ccb22e4116f05d56192db7ccdec03a0afa48c01c085"} Oct 06 22:59:13 crc kubenswrapper[5014]: I1006 22:59:13.667217 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"3d7105fa-62d5-469e-b4f3-774c6b1a5a90","Type":"ContainerStarted","Data":"21ac0bdce91c3f69e4c28b9d3531b4c92c623ee3b2bb615e5ad088655a626d45"} Oct 06 22:59:13 crc kubenswrapper[5014]: I1006 22:59:13.670993 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"9916618a-700d-406e-b443-38423eafb158","Type":"ContainerStarted","Data":"3c440439ac76eec04a154976f8c17fa51df128a1752e80f6dbe07c168bab2960"} Oct 06 22:59:13 crc kubenswrapper[5014]: I1006 22:59:13.671040 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"9916618a-700d-406e-b443-38423eafb158","Type":"ContainerStarted","Data":"f3bab0a7577f6d1c558664839a9904c78ef76f02e782d35a117d1b0420291140"} Oct 06 22:59:13 crc kubenswrapper[5014]: I1006 22:59:13.671050 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"9916618a-700d-406e-b443-38423eafb158","Type":"ContainerStarted","Data":"8bef2beff90d963455b8a507c71cc5fbd80acea67565e75ce38e0bfc49041afd"} Oct 06 22:59:13 crc kubenswrapper[5014]: I1006 22:59:13.703056 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-1" podStartSLOduration=2.703036024 podStartE2EDuration="2.703036024s" podCreationTimestamp="2025-10-06 22:59:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:59:13.695187899 +0000 UTC m=+5298.988224643" watchObservedRunningTime="2025-10-06 22:59:13.703036024 +0000 UTC m=+5298.996072758" Oct 06 22:59:13 crc kubenswrapper[5014]: I1006 22:59:13.723284 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=2.7232659249999998 podStartE2EDuration="2.723265925s" podCreationTimestamp="2025-10-06 22:59:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:59:13.717940149 +0000 UTC m=+5299.010976903" watchObservedRunningTime="2025-10-06 22:59:13.723265925 +0000 UTC m=+5299.016302679" Oct 06 22:59:14 crc kubenswrapper[5014]: I1006 22:59:14.683898 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"44f14dca-2c88-421b-8f41-302926b341bb","Type":"ContainerStarted","Data":"77d02502ce34c1900932a4bd6cd5191fcc953cfad0aff38225e0be3c1fe7de58"} Oct 06 22:59:14 crc kubenswrapper[5014]: I1006 22:59:14.684376 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"44f14dca-2c88-421b-8f41-302926b341bb","Type":"ContainerStarted","Data":"d191fd74a9b42d97433fcde24029094d9ab04b211170786b29af6fa40df7d173"} Oct 06 22:59:14 crc kubenswrapper[5014]: I1006 22:59:14.684407 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"44f14dca-2c88-421b-8f41-302926b341bb","Type":"ContainerStarted","Data":"1a5023178d387d12bce36e1a3dc3b113cda3f6ec53d21b1971d012a053936e8a"} Oct 06 22:59:14 crc kubenswrapper[5014]: I1006 22:59:14.734299 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-2" podStartSLOduration=3.734274992 podStartE2EDuration="3.734274992s" podCreationTimestamp="2025-10-06 22:59:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:59:14.722128473 +0000 UTC m=+5300.015165237" watchObservedRunningTime="2025-10-06 22:59:14.734274992 +0000 UTC m=+5300.027311766" Oct 06 22:59:15 crc kubenswrapper[5014]: I1006 22:59:15.476770 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:15 crc kubenswrapper[5014]: I1006 22:59:15.506185 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:15 crc kubenswrapper[5014]: I1006 22:59:15.515672 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:15 crc kubenswrapper[5014]: I1006 22:59:15.567507 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:15 crc kubenswrapper[5014]: I1006 22:59:15.585763 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:15 crc kubenswrapper[5014]: I1006 22:59:15.651115 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:16 crc kubenswrapper[5014]: I1006 22:59:16.643092 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:16 crc kubenswrapper[5014]: I1006 22:59:16.666562 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:16 crc kubenswrapper[5014]: I1006 22:59:16.712489 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Oct 06 22:59:16 crc kubenswrapper[5014]: I1006 22:59:16.728217 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:16 crc kubenswrapper[5014]: I1006 22:59:16.782207 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-2" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.017366 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6444955555-nczwt"] Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.020681 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6444955555-nczwt" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.029553 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.031051 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6444955555-nczwt"] Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.078487 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bv4j\" (UniqueName: \"kubernetes.io/projected/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-kube-api-access-2bv4j\") pod \"dnsmasq-dns-6444955555-nczwt\" (UID: \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\") " pod="openstack/dnsmasq-dns-6444955555-nczwt" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.078529 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-dns-svc\") pod \"dnsmasq-dns-6444955555-nczwt\" (UID: \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\") " pod="openstack/dnsmasq-dns-6444955555-nczwt" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.078629 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-ovsdbserver-nb\") pod \"dnsmasq-dns-6444955555-nczwt\" (UID: \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\") " pod="openstack/dnsmasq-dns-6444955555-nczwt" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.078735 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-config\") pod \"dnsmasq-dns-6444955555-nczwt\" (UID: \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\") " pod="openstack/dnsmasq-dns-6444955555-nczwt" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.179887 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-config\") pod \"dnsmasq-dns-6444955555-nczwt\" (UID: \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\") " pod="openstack/dnsmasq-dns-6444955555-nczwt" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.179977 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bv4j\" (UniqueName: \"kubernetes.io/projected/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-kube-api-access-2bv4j\") pod \"dnsmasq-dns-6444955555-nczwt\" (UID: \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\") " pod="openstack/dnsmasq-dns-6444955555-nczwt" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.180005 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-dns-svc\") pod \"dnsmasq-dns-6444955555-nczwt\" (UID: \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\") " pod="openstack/dnsmasq-dns-6444955555-nczwt" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.180084 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-ovsdbserver-nb\") pod \"dnsmasq-dns-6444955555-nczwt\" (UID: \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\") " pod="openstack/dnsmasq-dns-6444955555-nczwt" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.181130 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-config\") pod \"dnsmasq-dns-6444955555-nczwt\" (UID: \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\") " pod="openstack/dnsmasq-dns-6444955555-nczwt" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.181153 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-dns-svc\") pod \"dnsmasq-dns-6444955555-nczwt\" (UID: \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\") " pod="openstack/dnsmasq-dns-6444955555-nczwt" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.181184 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-ovsdbserver-nb\") pod \"dnsmasq-dns-6444955555-nczwt\" (UID: \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\") " pod="openstack/dnsmasq-dns-6444955555-nczwt" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.205804 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bv4j\" (UniqueName: \"kubernetes.io/projected/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-kube-api-access-2bv4j\") pod \"dnsmasq-dns-6444955555-nczwt\" (UID: \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\") " pod="openstack/dnsmasq-dns-6444955555-nczwt" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.349525 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6444955555-nczwt" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.477307 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.510291 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.516310 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:17 crc kubenswrapper[5014]: I1006 22:59:17.930090 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6444955555-nczwt"] Oct 06 22:59:18 crc kubenswrapper[5014]: I1006 22:59:18.552155 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:18 crc kubenswrapper[5014]: I1006 22:59:18.558875 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:18 crc kubenswrapper[5014]: I1006 22:59:18.594733 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:18 crc kubenswrapper[5014]: I1006 22:59:18.623135 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Oct 06 22:59:18 crc kubenswrapper[5014]: I1006 22:59:18.642952 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-1" Oct 06 22:59:18 crc kubenswrapper[5014]: I1006 22:59:18.738833 5014 generic.go:334] "Generic (PLEG): container finished" podID="629cf9cb-4dad-4b6c-8bed-6842ee06b6bf" containerID="97a8bc1c8036f27799b2d0c215a1b273ff29e3c4882a9c922e7378e6fd6c724b" exitCode=0 Oct 06 22:59:18 crc kubenswrapper[5014]: I1006 22:59:18.739061 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6444955555-nczwt" event={"ID":"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf","Type":"ContainerDied","Data":"97a8bc1c8036f27799b2d0c215a1b273ff29e3c4882a9c922e7378e6fd6c724b"} Oct 06 22:59:18 crc kubenswrapper[5014]: I1006 22:59:18.741904 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6444955555-nczwt" event={"ID":"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf","Type":"ContainerStarted","Data":"ac47e17f87582121af946e521599e7bb5daca6c8e88bb4c1ff8ec6327dfca970"} Oct 06 22:59:18 crc kubenswrapper[5014]: I1006 22:59:18.862913 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-2" Oct 06 22:59:18 crc kubenswrapper[5014]: I1006 22:59:18.924728 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6444955555-nczwt"] Oct 06 22:59:18 crc kubenswrapper[5014]: I1006 22:59:18.964131 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-69864b896c-6crm5"] Oct 06 22:59:18 crc kubenswrapper[5014]: I1006 22:59:18.965971 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:18 crc kubenswrapper[5014]: I1006 22:59:18.969905 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Oct 06 22:59:18 crc kubenswrapper[5014]: I1006 22:59:18.977612 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69864b896c-6crm5"] Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.132778 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-config\") pod \"dnsmasq-dns-69864b896c-6crm5\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.132853 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-ovsdbserver-nb\") pod \"dnsmasq-dns-69864b896c-6crm5\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.132899 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-dns-svc\") pod \"dnsmasq-dns-69864b896c-6crm5\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.132920 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-ovsdbserver-sb\") pod \"dnsmasq-dns-69864b896c-6crm5\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.132952 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpc6c\" (UniqueName: \"kubernetes.io/projected/96fd7974-927c-499a-9221-b04af21765da-kube-api-access-qpc6c\") pod \"dnsmasq-dns-69864b896c-6crm5\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.234290 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-config\") pod \"dnsmasq-dns-69864b896c-6crm5\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.234439 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-ovsdbserver-nb\") pod \"dnsmasq-dns-69864b896c-6crm5\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.234524 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-dns-svc\") pod \"dnsmasq-dns-69864b896c-6crm5\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.234557 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-ovsdbserver-sb\") pod \"dnsmasq-dns-69864b896c-6crm5\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.234606 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpc6c\" (UniqueName: \"kubernetes.io/projected/96fd7974-927c-499a-9221-b04af21765da-kube-api-access-qpc6c\") pod \"dnsmasq-dns-69864b896c-6crm5\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.235409 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-config\") pod \"dnsmasq-dns-69864b896c-6crm5\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.235867 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-dns-svc\") pod \"dnsmasq-dns-69864b896c-6crm5\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.235960 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-ovsdbserver-nb\") pod \"dnsmasq-dns-69864b896c-6crm5\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.236326 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-ovsdbserver-sb\") pod \"dnsmasq-dns-69864b896c-6crm5\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.258971 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpc6c\" (UniqueName: \"kubernetes.io/projected/96fd7974-927c-499a-9221-b04af21765da-kube-api-access-qpc6c\") pod \"dnsmasq-dns-69864b896c-6crm5\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.282880 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.754763 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69864b896c-6crm5"] Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.757478 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6444955555-nczwt" event={"ID":"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf","Type":"ContainerStarted","Data":"5989e327d7d097398d98b426e8c50b5585269d15c36fca091350e3d46bf93f96"} Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.757743 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6444955555-nczwt" podUID="629cf9cb-4dad-4b6c-8bed-6842ee06b6bf" containerName="dnsmasq-dns" containerID="cri-o://5989e327d7d097398d98b426e8c50b5585269d15c36fca091350e3d46bf93f96" gracePeriod=10 Oct 06 22:59:19 crc kubenswrapper[5014]: W1006 22:59:19.767760 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod96fd7974_927c_499a_9221_b04af21765da.slice/crio-a1b2d97e8d09dcc8ae5f4570bcc529f747b07207291e569dbb907d40a889fcc5 WatchSource:0}: Error finding container a1b2d97e8d09dcc8ae5f4570bcc529f747b07207291e569dbb907d40a889fcc5: Status 404 returned error can't find the container with id a1b2d97e8d09dcc8ae5f4570bcc529f747b07207291e569dbb907d40a889fcc5 Oct 06 22:59:19 crc kubenswrapper[5014]: I1006 22:59:19.779930 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6444955555-nczwt" podStartSLOduration=3.779916554 podStartE2EDuration="3.779916554s" podCreationTimestamp="2025-10-06 22:59:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:59:19.77853335 +0000 UTC m=+5305.071570084" watchObservedRunningTime="2025-10-06 22:59:19.779916554 +0000 UTC m=+5305.072953278" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.160340 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6444955555-nczwt" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.253579 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bv4j\" (UniqueName: \"kubernetes.io/projected/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-kube-api-access-2bv4j\") pod \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\" (UID: \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\") " Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.253665 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-ovsdbserver-nb\") pod \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\" (UID: \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\") " Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.253725 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-config\") pod \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\" (UID: \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\") " Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.253754 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-dns-svc\") pod \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\" (UID: \"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf\") " Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.261025 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-kube-api-access-2bv4j" (OuterVolumeSpecName: "kube-api-access-2bv4j") pod "629cf9cb-4dad-4b6c-8bed-6842ee06b6bf" (UID: "629cf9cb-4dad-4b6c-8bed-6842ee06b6bf"). InnerVolumeSpecName "kube-api-access-2bv4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.309200 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "629cf9cb-4dad-4b6c-8bed-6842ee06b6bf" (UID: "629cf9cb-4dad-4b6c-8bed-6842ee06b6bf"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.312047 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "629cf9cb-4dad-4b6c-8bed-6842ee06b6bf" (UID: "629cf9cb-4dad-4b6c-8bed-6842ee06b6bf"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.329438 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-config" (OuterVolumeSpecName: "config") pod "629cf9cb-4dad-4b6c-8bed-6842ee06b6bf" (UID: "629cf9cb-4dad-4b6c-8bed-6842ee06b6bf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.355996 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.356027 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-config\") on node \"crc\" DevicePath \"\"" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.356037 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.356047 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bv4j\" (UniqueName: \"kubernetes.io/projected/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf-kube-api-access-2bv4j\") on node \"crc\" DevicePath \"\"" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.634054 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-1" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.770398 5014 generic.go:334] "Generic (PLEG): container finished" podID="96fd7974-927c-499a-9221-b04af21765da" containerID="3a60fe28ecc7347e9d6ffd72c014a662a524d00cad34dcf4fcde41061f68d4a2" exitCode=0 Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.770527 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69864b896c-6crm5" event={"ID":"96fd7974-927c-499a-9221-b04af21765da","Type":"ContainerDied","Data":"3a60fe28ecc7347e9d6ffd72c014a662a524d00cad34dcf4fcde41061f68d4a2"} Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.770775 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69864b896c-6crm5" event={"ID":"96fd7974-927c-499a-9221-b04af21765da","Type":"ContainerStarted","Data":"a1b2d97e8d09dcc8ae5f4570bcc529f747b07207291e569dbb907d40a889fcc5"} Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.775026 5014 generic.go:334] "Generic (PLEG): container finished" podID="629cf9cb-4dad-4b6c-8bed-6842ee06b6bf" containerID="5989e327d7d097398d98b426e8c50b5585269d15c36fca091350e3d46bf93f96" exitCode=0 Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.775060 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6444955555-nczwt" event={"ID":"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf","Type":"ContainerDied","Data":"5989e327d7d097398d98b426e8c50b5585269d15c36fca091350e3d46bf93f96"} Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.775105 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6444955555-nczwt" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.775152 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6444955555-nczwt" event={"ID":"629cf9cb-4dad-4b6c-8bed-6842ee06b6bf","Type":"ContainerDied","Data":"ac47e17f87582121af946e521599e7bb5daca6c8e88bb4c1ff8ec6327dfca970"} Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.775189 5014 scope.go:117] "RemoveContainer" containerID="5989e327d7d097398d98b426e8c50b5585269d15c36fca091350e3d46bf93f96" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.899363 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6444955555-nczwt"] Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.902977 5014 scope.go:117] "RemoveContainer" containerID="97a8bc1c8036f27799b2d0c215a1b273ff29e3c4882a9c922e7378e6fd6c724b" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.904977 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6444955555-nczwt"] Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.937948 5014 scope.go:117] "RemoveContainer" containerID="5989e327d7d097398d98b426e8c50b5585269d15c36fca091350e3d46bf93f96" Oct 06 22:59:20 crc kubenswrapper[5014]: E1006 22:59:20.939118 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5989e327d7d097398d98b426e8c50b5585269d15c36fca091350e3d46bf93f96\": container with ID starting with 5989e327d7d097398d98b426e8c50b5585269d15c36fca091350e3d46bf93f96 not found: ID does not exist" containerID="5989e327d7d097398d98b426e8c50b5585269d15c36fca091350e3d46bf93f96" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.939166 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5989e327d7d097398d98b426e8c50b5585269d15c36fca091350e3d46bf93f96"} err="failed to get container status \"5989e327d7d097398d98b426e8c50b5585269d15c36fca091350e3d46bf93f96\": rpc error: code = NotFound desc = could not find container \"5989e327d7d097398d98b426e8c50b5585269d15c36fca091350e3d46bf93f96\": container with ID starting with 5989e327d7d097398d98b426e8c50b5585269d15c36fca091350e3d46bf93f96 not found: ID does not exist" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.939203 5014 scope.go:117] "RemoveContainer" containerID="97a8bc1c8036f27799b2d0c215a1b273ff29e3c4882a9c922e7378e6fd6c724b" Oct 06 22:59:20 crc kubenswrapper[5014]: E1006 22:59:20.939763 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97a8bc1c8036f27799b2d0c215a1b273ff29e3c4882a9c922e7378e6fd6c724b\": container with ID starting with 97a8bc1c8036f27799b2d0c215a1b273ff29e3c4882a9c922e7378e6fd6c724b not found: ID does not exist" containerID="97a8bc1c8036f27799b2d0c215a1b273ff29e3c4882a9c922e7378e6fd6c724b" Oct 06 22:59:20 crc kubenswrapper[5014]: I1006 22:59:20.939804 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97a8bc1c8036f27799b2d0c215a1b273ff29e3c4882a9c922e7378e6fd6c724b"} err="failed to get container status \"97a8bc1c8036f27799b2d0c215a1b273ff29e3c4882a9c922e7378e6fd6c724b\": rpc error: code = NotFound desc = could not find container \"97a8bc1c8036f27799b2d0c215a1b273ff29e3c4882a9c922e7378e6fd6c724b\": container with ID starting with 97a8bc1c8036f27799b2d0c215a1b273ff29e3c4882a9c922e7378e6fd6c724b not found: ID does not exist" Oct 06 22:59:21 crc kubenswrapper[5014]: I1006 22:59:21.503066 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="629cf9cb-4dad-4b6c-8bed-6842ee06b6bf" path="/var/lib/kubelet/pods/629cf9cb-4dad-4b6c-8bed-6842ee06b6bf/volumes" Oct 06 22:59:21 crc kubenswrapper[5014]: I1006 22:59:21.790097 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69864b896c-6crm5" event={"ID":"96fd7974-927c-499a-9221-b04af21765da","Type":"ContainerStarted","Data":"357466a4a53aa038aef3c14d1106ae1f16c32b204c7e61bfdc715636d04266c0"} Oct 06 22:59:21 crc kubenswrapper[5014]: I1006 22:59:21.790263 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:21 crc kubenswrapper[5014]: I1006 22:59:21.826526 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-69864b896c-6crm5" podStartSLOduration=3.826498714 podStartE2EDuration="3.826498714s" podCreationTimestamp="2025-10-06 22:59:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:59:21.822405016 +0000 UTC m=+5307.115441800" watchObservedRunningTime="2025-10-06 22:59:21.826498714 +0000 UTC m=+5307.119535488" Oct 06 22:59:23 crc kubenswrapper[5014]: I1006 22:59:23.942350 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-copy-data"] Oct 06 22:59:23 crc kubenswrapper[5014]: E1006 22:59:23.945485 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="629cf9cb-4dad-4b6c-8bed-6842ee06b6bf" containerName="init" Oct 06 22:59:23 crc kubenswrapper[5014]: I1006 22:59:23.945782 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="629cf9cb-4dad-4b6c-8bed-6842ee06b6bf" containerName="init" Oct 06 22:59:23 crc kubenswrapper[5014]: E1006 22:59:23.945971 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="629cf9cb-4dad-4b6c-8bed-6842ee06b6bf" containerName="dnsmasq-dns" Oct 06 22:59:23 crc kubenswrapper[5014]: I1006 22:59:23.946304 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="629cf9cb-4dad-4b6c-8bed-6842ee06b6bf" containerName="dnsmasq-dns" Oct 06 22:59:23 crc kubenswrapper[5014]: I1006 22:59:23.947609 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="629cf9cb-4dad-4b6c-8bed-6842ee06b6bf" containerName="dnsmasq-dns" Oct 06 22:59:23 crc kubenswrapper[5014]: I1006 22:59:23.949722 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Oct 06 22:59:23 crc kubenswrapper[5014]: I1006 22:59:23.949921 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Oct 06 22:59:23 crc kubenswrapper[5014]: I1006 22:59:23.953200 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovn-data-cert" Oct 06 22:59:24 crc kubenswrapper[5014]: I1006 22:59:24.026737 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lsjc\" (UniqueName: \"kubernetes.io/projected/598e320b-1fec-436c-92fa-2dcc28318950-kube-api-access-2lsjc\") pod \"ovn-copy-data\" (UID: \"598e320b-1fec-436c-92fa-2dcc28318950\") " pod="openstack/ovn-copy-data" Oct 06 22:59:24 crc kubenswrapper[5014]: I1006 22:59:24.027013 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-94047f1c-c85b-43ef-810e-3bc014230341\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-94047f1c-c85b-43ef-810e-3bc014230341\") pod \"ovn-copy-data\" (UID: \"598e320b-1fec-436c-92fa-2dcc28318950\") " pod="openstack/ovn-copy-data" Oct 06 22:59:24 crc kubenswrapper[5014]: I1006 22:59:24.027071 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/598e320b-1fec-436c-92fa-2dcc28318950-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"598e320b-1fec-436c-92fa-2dcc28318950\") " pod="openstack/ovn-copy-data" Oct 06 22:59:24 crc kubenswrapper[5014]: I1006 22:59:24.129045 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lsjc\" (UniqueName: \"kubernetes.io/projected/598e320b-1fec-436c-92fa-2dcc28318950-kube-api-access-2lsjc\") pod \"ovn-copy-data\" (UID: \"598e320b-1fec-436c-92fa-2dcc28318950\") " pod="openstack/ovn-copy-data" Oct 06 22:59:24 crc kubenswrapper[5014]: I1006 22:59:24.129582 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-94047f1c-c85b-43ef-810e-3bc014230341\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-94047f1c-c85b-43ef-810e-3bc014230341\") pod \"ovn-copy-data\" (UID: \"598e320b-1fec-436c-92fa-2dcc28318950\") " pod="openstack/ovn-copy-data" Oct 06 22:59:24 crc kubenswrapper[5014]: I1006 22:59:24.129787 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/598e320b-1fec-436c-92fa-2dcc28318950-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"598e320b-1fec-436c-92fa-2dcc28318950\") " pod="openstack/ovn-copy-data" Oct 06 22:59:24 crc kubenswrapper[5014]: I1006 22:59:24.134021 5014 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 06 22:59:24 crc kubenswrapper[5014]: I1006 22:59:24.134127 5014 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-94047f1c-c85b-43ef-810e-3bc014230341\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-94047f1c-c85b-43ef-810e-3bc014230341\") pod \"ovn-copy-data\" (UID: \"598e320b-1fec-436c-92fa-2dcc28318950\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ce19b13b492afa350843e7d612a6dd32de4731be8887e5aa1239c230a0a46dc3/globalmount\"" pod="openstack/ovn-copy-data" Oct 06 22:59:24 crc kubenswrapper[5014]: I1006 22:59:24.140729 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/598e320b-1fec-436c-92fa-2dcc28318950-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"598e320b-1fec-436c-92fa-2dcc28318950\") " pod="openstack/ovn-copy-data" Oct 06 22:59:24 crc kubenswrapper[5014]: I1006 22:59:24.158566 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lsjc\" (UniqueName: \"kubernetes.io/projected/598e320b-1fec-436c-92fa-2dcc28318950-kube-api-access-2lsjc\") pod \"ovn-copy-data\" (UID: \"598e320b-1fec-436c-92fa-2dcc28318950\") " pod="openstack/ovn-copy-data" Oct 06 22:59:24 crc kubenswrapper[5014]: I1006 22:59:24.185336 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-94047f1c-c85b-43ef-810e-3bc014230341\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-94047f1c-c85b-43ef-810e-3bc014230341\") pod \"ovn-copy-data\" (UID: \"598e320b-1fec-436c-92fa-2dcc28318950\") " pod="openstack/ovn-copy-data" Oct 06 22:59:24 crc kubenswrapper[5014]: I1006 22:59:24.277099 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Oct 06 22:59:24 crc kubenswrapper[5014]: I1006 22:59:24.796430 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Oct 06 22:59:24 crc kubenswrapper[5014]: I1006 22:59:24.832685 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"598e320b-1fec-436c-92fa-2dcc28318950","Type":"ContainerStarted","Data":"cf85ce950c78d8a43a02c0cb5c08b4e72673ed21c70aa636c12f6c4c1e084310"} Oct 06 22:59:25 crc kubenswrapper[5014]: I1006 22:59:25.844459 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"598e320b-1fec-436c-92fa-2dcc28318950","Type":"ContainerStarted","Data":"fbf3922409c8d141a6a08fb2eda85e23726163f4a8fa1937d91ec431ba22bb06"} Oct 06 22:59:25 crc kubenswrapper[5014]: I1006 22:59:25.872258 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-copy-data" podStartSLOduration=3.3280872759999998 podStartE2EDuration="3.872228615s" podCreationTimestamp="2025-10-06 22:59:22 +0000 UTC" firstStartedPulling="2025-10-06 22:59:24.806895213 +0000 UTC m=+5310.099931957" lastFinishedPulling="2025-10-06 22:59:25.351036512 +0000 UTC m=+5310.644073296" observedRunningTime="2025-10-06 22:59:25.868546469 +0000 UTC m=+5311.161583283" watchObservedRunningTime="2025-10-06 22:59:25.872228615 +0000 UTC m=+5311.165265389" Oct 06 22:59:28 crc kubenswrapper[5014]: I1006 22:59:28.485048 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 22:59:28 crc kubenswrapper[5014]: E1006 22:59:28.486330 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.285922 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.361166 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fdc957c47-knrqp"] Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.361465 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" podUID="0c45fd94-0ec2-4683-a66b-aef65f906ca5" containerName="dnsmasq-dns" containerID="cri-o://5bf86b5cf828a134f35eab1b7203f0bd99ab1424df9e70da39f698064d6ffa0a" gracePeriod=10 Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.862942 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.897368 5014 generic.go:334] "Generic (PLEG): container finished" podID="0c45fd94-0ec2-4683-a66b-aef65f906ca5" containerID="5bf86b5cf828a134f35eab1b7203f0bd99ab1424df9e70da39f698064d6ffa0a" exitCode=0 Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.897430 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" event={"ID":"0c45fd94-0ec2-4683-a66b-aef65f906ca5","Type":"ContainerDied","Data":"5bf86b5cf828a134f35eab1b7203f0bd99ab1424df9e70da39f698064d6ffa0a"} Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.897440 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.897467 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fdc957c47-knrqp" event={"ID":"0c45fd94-0ec2-4683-a66b-aef65f906ca5","Type":"ContainerDied","Data":"0245d83e5db0d5028a5352b4236d4ddfca70668d5e8f7716f9ae489d82b1ced3"} Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.897490 5014 scope.go:117] "RemoveContainer" containerID="5bf86b5cf828a134f35eab1b7203f0bd99ab1424df9e70da39f698064d6ffa0a" Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.940213 5014 scope.go:117] "RemoveContainer" containerID="a5320a55f37ebf1e8db77bf25880f6585e8867d1bd558b5063a580cb49ebdc7e" Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.966837 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7qxrn\" (UniqueName: \"kubernetes.io/projected/0c45fd94-0ec2-4683-a66b-aef65f906ca5-kube-api-access-7qxrn\") pod \"0c45fd94-0ec2-4683-a66b-aef65f906ca5\" (UID: \"0c45fd94-0ec2-4683-a66b-aef65f906ca5\") " Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.966930 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c45fd94-0ec2-4683-a66b-aef65f906ca5-config\") pod \"0c45fd94-0ec2-4683-a66b-aef65f906ca5\" (UID: \"0c45fd94-0ec2-4683-a66b-aef65f906ca5\") " Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.966976 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0c45fd94-0ec2-4683-a66b-aef65f906ca5-dns-svc\") pod \"0c45fd94-0ec2-4683-a66b-aef65f906ca5\" (UID: \"0c45fd94-0ec2-4683-a66b-aef65f906ca5\") " Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.974665 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c45fd94-0ec2-4683-a66b-aef65f906ca5-kube-api-access-7qxrn" (OuterVolumeSpecName: "kube-api-access-7qxrn") pod "0c45fd94-0ec2-4683-a66b-aef65f906ca5" (UID: "0c45fd94-0ec2-4683-a66b-aef65f906ca5"). InnerVolumeSpecName "kube-api-access-7qxrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.976049 5014 scope.go:117] "RemoveContainer" containerID="5bf86b5cf828a134f35eab1b7203f0bd99ab1424df9e70da39f698064d6ffa0a" Oct 06 22:59:29 crc kubenswrapper[5014]: E1006 22:59:29.976530 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bf86b5cf828a134f35eab1b7203f0bd99ab1424df9e70da39f698064d6ffa0a\": container with ID starting with 5bf86b5cf828a134f35eab1b7203f0bd99ab1424df9e70da39f698064d6ffa0a not found: ID does not exist" containerID="5bf86b5cf828a134f35eab1b7203f0bd99ab1424df9e70da39f698064d6ffa0a" Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.976604 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bf86b5cf828a134f35eab1b7203f0bd99ab1424df9e70da39f698064d6ffa0a"} err="failed to get container status \"5bf86b5cf828a134f35eab1b7203f0bd99ab1424df9e70da39f698064d6ffa0a\": rpc error: code = NotFound desc = could not find container \"5bf86b5cf828a134f35eab1b7203f0bd99ab1424df9e70da39f698064d6ffa0a\": container with ID starting with 5bf86b5cf828a134f35eab1b7203f0bd99ab1424df9e70da39f698064d6ffa0a not found: ID does not exist" Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.976654 5014 scope.go:117] "RemoveContainer" containerID="a5320a55f37ebf1e8db77bf25880f6585e8867d1bd558b5063a580cb49ebdc7e" Oct 06 22:59:29 crc kubenswrapper[5014]: E1006 22:59:29.976921 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5320a55f37ebf1e8db77bf25880f6585e8867d1bd558b5063a580cb49ebdc7e\": container with ID starting with a5320a55f37ebf1e8db77bf25880f6585e8867d1bd558b5063a580cb49ebdc7e not found: ID does not exist" containerID="a5320a55f37ebf1e8db77bf25880f6585e8867d1bd558b5063a580cb49ebdc7e" Oct 06 22:59:29 crc kubenswrapper[5014]: I1006 22:59:29.976942 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5320a55f37ebf1e8db77bf25880f6585e8867d1bd558b5063a580cb49ebdc7e"} err="failed to get container status \"a5320a55f37ebf1e8db77bf25880f6585e8867d1bd558b5063a580cb49ebdc7e\": rpc error: code = NotFound desc = could not find container \"a5320a55f37ebf1e8db77bf25880f6585e8867d1bd558b5063a580cb49ebdc7e\": container with ID starting with a5320a55f37ebf1e8db77bf25880f6585e8867d1bd558b5063a580cb49ebdc7e not found: ID does not exist" Oct 06 22:59:30 crc kubenswrapper[5014]: I1006 22:59:30.020513 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c45fd94-0ec2-4683-a66b-aef65f906ca5-config" (OuterVolumeSpecName: "config") pod "0c45fd94-0ec2-4683-a66b-aef65f906ca5" (UID: "0c45fd94-0ec2-4683-a66b-aef65f906ca5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:59:30 crc kubenswrapper[5014]: I1006 22:59:30.023725 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c45fd94-0ec2-4683-a66b-aef65f906ca5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0c45fd94-0ec2-4683-a66b-aef65f906ca5" (UID: "0c45fd94-0ec2-4683-a66b-aef65f906ca5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 22:59:30 crc kubenswrapper[5014]: I1006 22:59:30.068537 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7qxrn\" (UniqueName: \"kubernetes.io/projected/0c45fd94-0ec2-4683-a66b-aef65f906ca5-kube-api-access-7qxrn\") on node \"crc\" DevicePath \"\"" Oct 06 22:59:30 crc kubenswrapper[5014]: I1006 22:59:30.068567 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c45fd94-0ec2-4683-a66b-aef65f906ca5-config\") on node \"crc\" DevicePath \"\"" Oct 06 22:59:30 crc kubenswrapper[5014]: I1006 22:59:30.068575 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0c45fd94-0ec2-4683-a66b-aef65f906ca5-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 22:59:30 crc kubenswrapper[5014]: I1006 22:59:30.231446 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fdc957c47-knrqp"] Oct 06 22:59:30 crc kubenswrapper[5014]: I1006 22:59:30.236203 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5fdc957c47-knrqp"] Oct 06 22:59:31 crc kubenswrapper[5014]: I1006 22:59:31.504772 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c45fd94-0ec2-4683-a66b-aef65f906ca5" path="/var/lib/kubelet/pods/0c45fd94-0ec2-4683-a66b-aef65f906ca5/volumes" Oct 06 22:59:31 crc kubenswrapper[5014]: I1006 22:59:31.895466 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Oct 06 22:59:31 crc kubenswrapper[5014]: E1006 22:59:31.896567 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c45fd94-0ec2-4683-a66b-aef65f906ca5" containerName="init" Oct 06 22:59:31 crc kubenswrapper[5014]: I1006 22:59:31.896593 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c45fd94-0ec2-4683-a66b-aef65f906ca5" containerName="init" Oct 06 22:59:31 crc kubenswrapper[5014]: E1006 22:59:31.896655 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c45fd94-0ec2-4683-a66b-aef65f906ca5" containerName="dnsmasq-dns" Oct 06 22:59:31 crc kubenswrapper[5014]: I1006 22:59:31.896667 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c45fd94-0ec2-4683-a66b-aef65f906ca5" containerName="dnsmasq-dns" Oct 06 22:59:31 crc kubenswrapper[5014]: I1006 22:59:31.896856 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c45fd94-0ec2-4683-a66b-aef65f906ca5" containerName="dnsmasq-dns" Oct 06 22:59:31 crc kubenswrapper[5014]: I1006 22:59:31.899108 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Oct 06 22:59:31 crc kubenswrapper[5014]: I1006 22:59:31.910350 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Oct 06 22:59:31 crc kubenswrapper[5014]: I1006 22:59:31.910560 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Oct 06 22:59:31 crc kubenswrapper[5014]: I1006 22:59:31.910911 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-758bs" Oct 06 22:59:31 crc kubenswrapper[5014]: I1006 22:59:31.910980 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Oct 06 22:59:31 crc kubenswrapper[5014]: I1006 22:59:31.922335 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.002540 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f255b6d-1bad-47db-8049-e388bcfbf98c-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.002674 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbnv5\" (UniqueName: \"kubernetes.io/projected/7f255b6d-1bad-47db-8049-e388bcfbf98c-kube-api-access-gbnv5\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.002715 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f255b6d-1bad-47db-8049-e388bcfbf98c-config\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.002938 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f255b6d-1bad-47db-8049-e388bcfbf98c-scripts\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.002979 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f255b6d-1bad-47db-8049-e388bcfbf98c-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.003021 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f255b6d-1bad-47db-8049-e388bcfbf98c-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.003050 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7f255b6d-1bad-47db-8049-e388bcfbf98c-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.105129 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f255b6d-1bad-47db-8049-e388bcfbf98c-scripts\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.105206 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f255b6d-1bad-47db-8049-e388bcfbf98c-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.105253 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f255b6d-1bad-47db-8049-e388bcfbf98c-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.105283 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7f255b6d-1bad-47db-8049-e388bcfbf98c-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.105325 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f255b6d-1bad-47db-8049-e388bcfbf98c-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.105359 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbnv5\" (UniqueName: \"kubernetes.io/projected/7f255b6d-1bad-47db-8049-e388bcfbf98c-kube-api-access-gbnv5\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.105385 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f255b6d-1bad-47db-8049-e388bcfbf98c-config\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.106866 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7f255b6d-1bad-47db-8049-e388bcfbf98c-scripts\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.106900 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f255b6d-1bad-47db-8049-e388bcfbf98c-config\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.108200 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7f255b6d-1bad-47db-8049-e388bcfbf98c-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.113126 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f255b6d-1bad-47db-8049-e388bcfbf98c-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.116854 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7f255b6d-1bad-47db-8049-e388bcfbf98c-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.119658 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f255b6d-1bad-47db-8049-e388bcfbf98c-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.135393 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbnv5\" (UniqueName: \"kubernetes.io/projected/7f255b6d-1bad-47db-8049-e388bcfbf98c-kube-api-access-gbnv5\") pod \"ovn-northd-0\" (UID: \"7f255b6d-1bad-47db-8049-e388bcfbf98c\") " pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.225658 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.720924 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Oct 06 22:59:32 crc kubenswrapper[5014]: W1006 22:59:32.731452 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7f255b6d_1bad_47db_8049_e388bcfbf98c.slice/crio-928ce3ec7bbf04c64b03db387c8298d3b9ed77e1a66b3be630df662516577e45 WatchSource:0}: Error finding container 928ce3ec7bbf04c64b03db387c8298d3b9ed77e1a66b3be630df662516577e45: Status 404 returned error can't find the container with id 928ce3ec7bbf04c64b03db387c8298d3b9ed77e1a66b3be630df662516577e45 Oct 06 22:59:32 crc kubenswrapper[5014]: I1006 22:59:32.937304 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"7f255b6d-1bad-47db-8049-e388bcfbf98c","Type":"ContainerStarted","Data":"928ce3ec7bbf04c64b03db387c8298d3b9ed77e1a66b3be630df662516577e45"} Oct 06 22:59:33 crc kubenswrapper[5014]: I1006 22:59:33.949864 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"7f255b6d-1bad-47db-8049-e388bcfbf98c","Type":"ContainerStarted","Data":"067fe26b605301dc6a63f57ab9d805889fbad1734913abd4eee101309bf2c1f5"} Oct 06 22:59:33 crc kubenswrapper[5014]: I1006 22:59:33.950383 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Oct 06 22:59:33 crc kubenswrapper[5014]: I1006 22:59:33.950396 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"7f255b6d-1bad-47db-8049-e388bcfbf98c","Type":"ContainerStarted","Data":"2a75affa423fccd9716f75501d5c012b00c544912e0d94ccff1bcfe8070a11b4"} Oct 06 22:59:33 crc kubenswrapper[5014]: I1006 22:59:33.971517 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.9714935110000003 podStartE2EDuration="2.971493511s" podCreationTimestamp="2025-10-06 22:59:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:59:33.970060586 +0000 UTC m=+5319.263097360" watchObservedRunningTime="2025-10-06 22:59:33.971493511 +0000 UTC m=+5319.264530285" Oct 06 22:59:37 crc kubenswrapper[5014]: I1006 22:59:37.785731 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-n2tx5"] Oct 06 22:59:37 crc kubenswrapper[5014]: I1006 22:59:37.787792 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-n2tx5" Oct 06 22:59:37 crc kubenswrapper[5014]: I1006 22:59:37.795474 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-n2tx5"] Oct 06 22:59:37 crc kubenswrapper[5014]: I1006 22:59:37.911770 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxqt5\" (UniqueName: \"kubernetes.io/projected/ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4-kube-api-access-jxqt5\") pod \"keystone-db-create-n2tx5\" (UID: \"ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4\") " pod="openstack/keystone-db-create-n2tx5" Oct 06 22:59:38 crc kubenswrapper[5014]: I1006 22:59:38.013991 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxqt5\" (UniqueName: \"kubernetes.io/projected/ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4-kube-api-access-jxqt5\") pod \"keystone-db-create-n2tx5\" (UID: \"ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4\") " pod="openstack/keystone-db-create-n2tx5" Oct 06 22:59:38 crc kubenswrapper[5014]: I1006 22:59:38.045864 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxqt5\" (UniqueName: \"kubernetes.io/projected/ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4-kube-api-access-jxqt5\") pod \"keystone-db-create-n2tx5\" (UID: \"ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4\") " pod="openstack/keystone-db-create-n2tx5" Oct 06 22:59:38 crc kubenswrapper[5014]: I1006 22:59:38.116692 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-n2tx5" Oct 06 22:59:38 crc kubenswrapper[5014]: I1006 22:59:38.566788 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-n2tx5"] Oct 06 22:59:38 crc kubenswrapper[5014]: W1006 22:59:38.575967 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podceb0fb2f_29c1_4540_9ea2_55333e9a3bb4.slice/crio-d8a882e660c87f1ab907f123499d6e08eb31cf0364c4e66623551d8ea7d320a7 WatchSource:0}: Error finding container d8a882e660c87f1ab907f123499d6e08eb31cf0364c4e66623551d8ea7d320a7: Status 404 returned error can't find the container with id d8a882e660c87f1ab907f123499d6e08eb31cf0364c4e66623551d8ea7d320a7 Oct 06 22:59:39 crc kubenswrapper[5014]: I1006 22:59:38.999865 5014 generic.go:334] "Generic (PLEG): container finished" podID="ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4" containerID="d7a88f93d3d4e108fbc1c5ffa2ef3113212acd44a8b4989c9615c28e069e4636" exitCode=0 Oct 06 22:59:39 crc kubenswrapper[5014]: I1006 22:59:38.999943 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-n2tx5" event={"ID":"ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4","Type":"ContainerDied","Data":"d7a88f93d3d4e108fbc1c5ffa2ef3113212acd44a8b4989c9615c28e069e4636"} Oct 06 22:59:39 crc kubenswrapper[5014]: I1006 22:59:39.000225 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-n2tx5" event={"ID":"ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4","Type":"ContainerStarted","Data":"d8a882e660c87f1ab907f123499d6e08eb31cf0364c4e66623551d8ea7d320a7"} Oct 06 22:59:40 crc kubenswrapper[5014]: I1006 22:59:40.437637 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-n2tx5" Oct 06 22:59:40 crc kubenswrapper[5014]: I1006 22:59:40.471640 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jxqt5\" (UniqueName: \"kubernetes.io/projected/ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4-kube-api-access-jxqt5\") pod \"ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4\" (UID: \"ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4\") " Oct 06 22:59:40 crc kubenswrapper[5014]: I1006 22:59:40.479815 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4-kube-api-access-jxqt5" (OuterVolumeSpecName: "kube-api-access-jxqt5") pod "ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4" (UID: "ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4"). InnerVolumeSpecName "kube-api-access-jxqt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:59:40 crc kubenswrapper[5014]: I1006 22:59:40.574461 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jxqt5\" (UniqueName: \"kubernetes.io/projected/ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4-kube-api-access-jxqt5\") on node \"crc\" DevicePath \"\"" Oct 06 22:59:41 crc kubenswrapper[5014]: I1006 22:59:41.025807 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-n2tx5" event={"ID":"ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4","Type":"ContainerDied","Data":"d8a882e660c87f1ab907f123499d6e08eb31cf0364c4e66623551d8ea7d320a7"} Oct 06 22:59:41 crc kubenswrapper[5014]: I1006 22:59:41.025855 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d8a882e660c87f1ab907f123499d6e08eb31cf0364c4e66623551d8ea7d320a7" Oct 06 22:59:41 crc kubenswrapper[5014]: I1006 22:59:41.025886 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-n2tx5" Oct 06 22:59:42 crc kubenswrapper[5014]: I1006 22:59:42.318257 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Oct 06 22:59:43 crc kubenswrapper[5014]: I1006 22:59:43.484910 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 22:59:43 crc kubenswrapper[5014]: E1006 22:59:43.485143 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:59:47 crc kubenswrapper[5014]: I1006 22:59:47.934395 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6bfb-account-create-wptvp"] Oct 06 22:59:47 crc kubenswrapper[5014]: E1006 22:59:47.935264 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4" containerName="mariadb-database-create" Oct 06 22:59:47 crc kubenswrapper[5014]: I1006 22:59:47.935279 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4" containerName="mariadb-database-create" Oct 06 22:59:47 crc kubenswrapper[5014]: I1006 22:59:47.935492 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4" containerName="mariadb-database-create" Oct 06 22:59:47 crc kubenswrapper[5014]: I1006 22:59:47.936151 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6bfb-account-create-wptvp" Oct 06 22:59:47 crc kubenswrapper[5014]: I1006 22:59:47.938641 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Oct 06 22:59:48 crc kubenswrapper[5014]: I1006 22:59:48.010968 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bl6sk\" (UniqueName: \"kubernetes.io/projected/592b407e-657b-4204-af83-d4fe4508ee7e-kube-api-access-bl6sk\") pod \"keystone-6bfb-account-create-wptvp\" (UID: \"592b407e-657b-4204-af83-d4fe4508ee7e\") " pod="openstack/keystone-6bfb-account-create-wptvp" Oct 06 22:59:48 crc kubenswrapper[5014]: I1006 22:59:48.011029 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6bfb-account-create-wptvp"] Oct 06 22:59:48 crc kubenswrapper[5014]: I1006 22:59:48.112835 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bl6sk\" (UniqueName: \"kubernetes.io/projected/592b407e-657b-4204-af83-d4fe4508ee7e-kube-api-access-bl6sk\") pod \"keystone-6bfb-account-create-wptvp\" (UID: \"592b407e-657b-4204-af83-d4fe4508ee7e\") " pod="openstack/keystone-6bfb-account-create-wptvp" Oct 06 22:59:48 crc kubenswrapper[5014]: I1006 22:59:48.133313 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bl6sk\" (UniqueName: \"kubernetes.io/projected/592b407e-657b-4204-af83-d4fe4508ee7e-kube-api-access-bl6sk\") pod \"keystone-6bfb-account-create-wptvp\" (UID: \"592b407e-657b-4204-af83-d4fe4508ee7e\") " pod="openstack/keystone-6bfb-account-create-wptvp" Oct 06 22:59:48 crc kubenswrapper[5014]: I1006 22:59:48.264799 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6bfb-account-create-wptvp" Oct 06 22:59:48 crc kubenswrapper[5014]: I1006 22:59:48.729749 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6bfb-account-create-wptvp"] Oct 06 22:59:48 crc kubenswrapper[5014]: W1006 22:59:48.734084 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod592b407e_657b_4204_af83_d4fe4508ee7e.slice/crio-4fe2132f2b00516c60d95f15fcea391a2b3ed66957e6cc6b512e8508971504d8 WatchSource:0}: Error finding container 4fe2132f2b00516c60d95f15fcea391a2b3ed66957e6cc6b512e8508971504d8: Status 404 returned error can't find the container with id 4fe2132f2b00516c60d95f15fcea391a2b3ed66957e6cc6b512e8508971504d8 Oct 06 22:59:49 crc kubenswrapper[5014]: I1006 22:59:49.111998 5014 generic.go:334] "Generic (PLEG): container finished" podID="592b407e-657b-4204-af83-d4fe4508ee7e" containerID="1addd1103523de258e0c26388b5b4aa83b99bcb56ae3b41b45eabe8dede008cb" exitCode=0 Oct 06 22:59:49 crc kubenswrapper[5014]: I1006 22:59:49.112405 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6bfb-account-create-wptvp" event={"ID":"592b407e-657b-4204-af83-d4fe4508ee7e","Type":"ContainerDied","Data":"1addd1103523de258e0c26388b5b4aa83b99bcb56ae3b41b45eabe8dede008cb"} Oct 06 22:59:49 crc kubenswrapper[5014]: I1006 22:59:49.112444 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6bfb-account-create-wptvp" event={"ID":"592b407e-657b-4204-af83-d4fe4508ee7e","Type":"ContainerStarted","Data":"4fe2132f2b00516c60d95f15fcea391a2b3ed66957e6cc6b512e8508971504d8"} Oct 06 22:59:50 crc kubenswrapper[5014]: I1006 22:59:50.550041 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6bfb-account-create-wptvp" Oct 06 22:59:50 crc kubenswrapper[5014]: I1006 22:59:50.661928 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bl6sk\" (UniqueName: \"kubernetes.io/projected/592b407e-657b-4204-af83-d4fe4508ee7e-kube-api-access-bl6sk\") pod \"592b407e-657b-4204-af83-d4fe4508ee7e\" (UID: \"592b407e-657b-4204-af83-d4fe4508ee7e\") " Oct 06 22:59:50 crc kubenswrapper[5014]: I1006 22:59:50.672111 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/592b407e-657b-4204-af83-d4fe4508ee7e-kube-api-access-bl6sk" (OuterVolumeSpecName: "kube-api-access-bl6sk") pod "592b407e-657b-4204-af83-d4fe4508ee7e" (UID: "592b407e-657b-4204-af83-d4fe4508ee7e"). InnerVolumeSpecName "kube-api-access-bl6sk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:59:50 crc kubenswrapper[5014]: I1006 22:59:50.764301 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bl6sk\" (UniqueName: \"kubernetes.io/projected/592b407e-657b-4204-af83-d4fe4508ee7e-kube-api-access-bl6sk\") on node \"crc\" DevicePath \"\"" Oct 06 22:59:51 crc kubenswrapper[5014]: I1006 22:59:51.134109 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6bfb-account-create-wptvp" event={"ID":"592b407e-657b-4204-af83-d4fe4508ee7e","Type":"ContainerDied","Data":"4fe2132f2b00516c60d95f15fcea391a2b3ed66957e6cc6b512e8508971504d8"} Oct 06 22:59:51 crc kubenswrapper[5014]: I1006 22:59:51.134163 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4fe2132f2b00516c60d95f15fcea391a2b3ed66957e6cc6b512e8508971504d8" Oct 06 22:59:51 crc kubenswrapper[5014]: I1006 22:59:51.134197 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6bfb-account-create-wptvp" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.433227 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-j9dm7"] Oct 06 22:59:53 crc kubenswrapper[5014]: E1006 22:59:53.434841 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="592b407e-657b-4204-af83-d4fe4508ee7e" containerName="mariadb-account-create" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.434940 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="592b407e-657b-4204-af83-d4fe4508ee7e" containerName="mariadb-account-create" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.435217 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="592b407e-657b-4204-af83-d4fe4508ee7e" containerName="mariadb-account-create" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.435976 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-j9dm7" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.442689 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.442805 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.443239 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-q97n8" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.443247 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.460913 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-j9dm7"] Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.511815 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3c29595-ff94-4ee2-a182-29445fe5d2ad-config-data\") pod \"keystone-db-sync-j9dm7\" (UID: \"c3c29595-ff94-4ee2-a182-29445fe5d2ad\") " pod="openstack/keystone-db-sync-j9dm7" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.511933 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qx85n\" (UniqueName: \"kubernetes.io/projected/c3c29595-ff94-4ee2-a182-29445fe5d2ad-kube-api-access-qx85n\") pod \"keystone-db-sync-j9dm7\" (UID: \"c3c29595-ff94-4ee2-a182-29445fe5d2ad\") " pod="openstack/keystone-db-sync-j9dm7" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.512382 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3c29595-ff94-4ee2-a182-29445fe5d2ad-combined-ca-bundle\") pod \"keystone-db-sync-j9dm7\" (UID: \"c3c29595-ff94-4ee2-a182-29445fe5d2ad\") " pod="openstack/keystone-db-sync-j9dm7" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.614339 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3c29595-ff94-4ee2-a182-29445fe5d2ad-combined-ca-bundle\") pod \"keystone-db-sync-j9dm7\" (UID: \"c3c29595-ff94-4ee2-a182-29445fe5d2ad\") " pod="openstack/keystone-db-sync-j9dm7" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.614391 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3c29595-ff94-4ee2-a182-29445fe5d2ad-config-data\") pod \"keystone-db-sync-j9dm7\" (UID: \"c3c29595-ff94-4ee2-a182-29445fe5d2ad\") " pod="openstack/keystone-db-sync-j9dm7" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.614524 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qx85n\" (UniqueName: \"kubernetes.io/projected/c3c29595-ff94-4ee2-a182-29445fe5d2ad-kube-api-access-qx85n\") pod \"keystone-db-sync-j9dm7\" (UID: \"c3c29595-ff94-4ee2-a182-29445fe5d2ad\") " pod="openstack/keystone-db-sync-j9dm7" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.628138 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3c29595-ff94-4ee2-a182-29445fe5d2ad-config-data\") pod \"keystone-db-sync-j9dm7\" (UID: \"c3c29595-ff94-4ee2-a182-29445fe5d2ad\") " pod="openstack/keystone-db-sync-j9dm7" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.629379 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3c29595-ff94-4ee2-a182-29445fe5d2ad-combined-ca-bundle\") pod \"keystone-db-sync-j9dm7\" (UID: \"c3c29595-ff94-4ee2-a182-29445fe5d2ad\") " pod="openstack/keystone-db-sync-j9dm7" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.630350 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qx85n\" (UniqueName: \"kubernetes.io/projected/c3c29595-ff94-4ee2-a182-29445fe5d2ad-kube-api-access-qx85n\") pod \"keystone-db-sync-j9dm7\" (UID: \"c3c29595-ff94-4ee2-a182-29445fe5d2ad\") " pod="openstack/keystone-db-sync-j9dm7" Oct 06 22:59:53 crc kubenswrapper[5014]: I1006 22:59:53.767256 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-j9dm7" Oct 06 22:59:54 crc kubenswrapper[5014]: I1006 22:59:54.250831 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-j9dm7"] Oct 06 22:59:54 crc kubenswrapper[5014]: W1006 22:59:54.253603 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc3c29595_ff94_4ee2_a182_29445fe5d2ad.slice/crio-b4dc82644e3193400fb3c54c409c7a1b0d953e06329a1d818c28f59d4791e548 WatchSource:0}: Error finding container b4dc82644e3193400fb3c54c409c7a1b0d953e06329a1d818c28f59d4791e548: Status 404 returned error can't find the container with id b4dc82644e3193400fb3c54c409c7a1b0d953e06329a1d818c28f59d4791e548 Oct 06 22:59:55 crc kubenswrapper[5014]: I1006 22:59:55.176667 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-j9dm7" event={"ID":"c3c29595-ff94-4ee2-a182-29445fe5d2ad","Type":"ContainerStarted","Data":"4298df35e3c863a6fd0801b4d5d65657f1f965f3684606edda11c19e512401b8"} Oct 06 22:59:55 crc kubenswrapper[5014]: I1006 22:59:55.176945 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-j9dm7" event={"ID":"c3c29595-ff94-4ee2-a182-29445fe5d2ad","Type":"ContainerStarted","Data":"b4dc82644e3193400fb3c54c409c7a1b0d953e06329a1d818c28f59d4791e548"} Oct 06 22:59:55 crc kubenswrapper[5014]: I1006 22:59:55.210837 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-j9dm7" podStartSLOduration=2.210810013 podStartE2EDuration="2.210810013s" podCreationTimestamp="2025-10-06 22:59:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 22:59:55.196163276 +0000 UTC m=+5340.489200010" watchObservedRunningTime="2025-10-06 22:59:55.210810013 +0000 UTC m=+5340.503846777" Oct 06 22:59:56 crc kubenswrapper[5014]: I1006 22:59:56.189790 5014 generic.go:334] "Generic (PLEG): container finished" podID="c3c29595-ff94-4ee2-a182-29445fe5d2ad" containerID="4298df35e3c863a6fd0801b4d5d65657f1f965f3684606edda11c19e512401b8" exitCode=0 Oct 06 22:59:56 crc kubenswrapper[5014]: I1006 22:59:56.189939 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-j9dm7" event={"ID":"c3c29595-ff94-4ee2-a182-29445fe5d2ad","Type":"ContainerDied","Data":"4298df35e3c863a6fd0801b4d5d65657f1f965f3684606edda11c19e512401b8"} Oct 06 22:59:57 crc kubenswrapper[5014]: I1006 22:59:57.631317 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-j9dm7" Oct 06 22:59:57 crc kubenswrapper[5014]: I1006 22:59:57.687639 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3c29595-ff94-4ee2-a182-29445fe5d2ad-combined-ca-bundle\") pod \"c3c29595-ff94-4ee2-a182-29445fe5d2ad\" (UID: \"c3c29595-ff94-4ee2-a182-29445fe5d2ad\") " Oct 06 22:59:57 crc kubenswrapper[5014]: I1006 22:59:57.687825 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3c29595-ff94-4ee2-a182-29445fe5d2ad-config-data\") pod \"c3c29595-ff94-4ee2-a182-29445fe5d2ad\" (UID: \"c3c29595-ff94-4ee2-a182-29445fe5d2ad\") " Oct 06 22:59:57 crc kubenswrapper[5014]: I1006 22:59:57.687861 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qx85n\" (UniqueName: \"kubernetes.io/projected/c3c29595-ff94-4ee2-a182-29445fe5d2ad-kube-api-access-qx85n\") pod \"c3c29595-ff94-4ee2-a182-29445fe5d2ad\" (UID: \"c3c29595-ff94-4ee2-a182-29445fe5d2ad\") " Oct 06 22:59:57 crc kubenswrapper[5014]: I1006 22:59:57.698973 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3c29595-ff94-4ee2-a182-29445fe5d2ad-kube-api-access-qx85n" (OuterVolumeSpecName: "kube-api-access-qx85n") pod "c3c29595-ff94-4ee2-a182-29445fe5d2ad" (UID: "c3c29595-ff94-4ee2-a182-29445fe5d2ad"). InnerVolumeSpecName "kube-api-access-qx85n". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 22:59:57 crc kubenswrapper[5014]: I1006 22:59:57.726393 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3c29595-ff94-4ee2-a182-29445fe5d2ad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c3c29595-ff94-4ee2-a182-29445fe5d2ad" (UID: "c3c29595-ff94-4ee2-a182-29445fe5d2ad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 22:59:57 crc kubenswrapper[5014]: I1006 22:59:57.731335 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3c29595-ff94-4ee2-a182-29445fe5d2ad-config-data" (OuterVolumeSpecName: "config-data") pod "c3c29595-ff94-4ee2-a182-29445fe5d2ad" (UID: "c3c29595-ff94-4ee2-a182-29445fe5d2ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 22:59:57 crc kubenswrapper[5014]: I1006 22:59:57.789264 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qx85n\" (UniqueName: \"kubernetes.io/projected/c3c29595-ff94-4ee2-a182-29445fe5d2ad-kube-api-access-qx85n\") on node \"crc\" DevicePath \"\"" Oct 06 22:59:57 crc kubenswrapper[5014]: I1006 22:59:57.789454 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3c29595-ff94-4ee2-a182-29445fe5d2ad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 22:59:57 crc kubenswrapper[5014]: I1006 22:59:57.789529 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3c29595-ff94-4ee2-a182-29445fe5d2ad-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.215591 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-j9dm7" event={"ID":"c3c29595-ff94-4ee2-a182-29445fe5d2ad","Type":"ContainerDied","Data":"b4dc82644e3193400fb3c54c409c7a1b0d953e06329a1d818c28f59d4791e548"} Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.215670 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4dc82644e3193400fb3c54c409c7a1b0d953e06329a1d818c28f59d4791e548" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.215762 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-j9dm7" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.472879 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6c7c55f8b9-fkwpf"] Oct 06 22:59:58 crc kubenswrapper[5014]: E1006 22:59:58.473509 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3c29595-ff94-4ee2-a182-29445fe5d2ad" containerName="keystone-db-sync" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.473524 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3c29595-ff94-4ee2-a182-29445fe5d2ad" containerName="keystone-db-sync" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.474807 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3c29595-ff94-4ee2-a182-29445fe5d2ad" containerName="keystone-db-sync" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.475647 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.486455 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 22:59:58 crc kubenswrapper[5014]: E1006 22:59:58.486651 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.494848 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c7c55f8b9-fkwpf"] Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.500711 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-ovsdbserver-sb\") pod \"dnsmasq-dns-6c7c55f8b9-fkwpf\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.500797 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkcgk\" (UniqueName: \"kubernetes.io/projected/ff5b5727-cb7f-44fb-823f-a7486367ba78-kube-api-access-xkcgk\") pod \"dnsmasq-dns-6c7c55f8b9-fkwpf\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.500876 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-dns-svc\") pod \"dnsmasq-dns-6c7c55f8b9-fkwpf\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.500944 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-ovsdbserver-nb\") pod \"dnsmasq-dns-6c7c55f8b9-fkwpf\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.501005 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-config\") pod \"dnsmasq-dns-6c7c55f8b9-fkwpf\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.534684 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-zscd2"] Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.535917 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.540281 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.540418 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-q97n8" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.540448 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.540551 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.548507 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-zscd2"] Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.602440 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-dns-svc\") pod \"dnsmasq-dns-6c7c55f8b9-fkwpf\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.602497 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-credential-keys\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.602529 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-combined-ca-bundle\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.602563 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-scripts\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.602579 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-ovsdbserver-nb\") pod \"dnsmasq-dns-6c7c55f8b9-fkwpf\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.602608 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-config\") pod \"dnsmasq-dns-6c7c55f8b9-fkwpf\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.602676 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-ovsdbserver-sb\") pod \"dnsmasq-dns-6c7c55f8b9-fkwpf\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.602703 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-fernet-keys\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.602732 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5wfk\" (UniqueName: \"kubernetes.io/projected/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-kube-api-access-d5wfk\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.602751 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkcgk\" (UniqueName: \"kubernetes.io/projected/ff5b5727-cb7f-44fb-823f-a7486367ba78-kube-api-access-xkcgk\") pod \"dnsmasq-dns-6c7c55f8b9-fkwpf\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.602770 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-config-data\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.603498 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-dns-svc\") pod \"dnsmasq-dns-6c7c55f8b9-fkwpf\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.604122 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-ovsdbserver-nb\") pod \"dnsmasq-dns-6c7c55f8b9-fkwpf\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.604819 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-config\") pod \"dnsmasq-dns-6c7c55f8b9-fkwpf\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.605312 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-ovsdbserver-sb\") pod \"dnsmasq-dns-6c7c55f8b9-fkwpf\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.626951 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkcgk\" (UniqueName: \"kubernetes.io/projected/ff5b5727-cb7f-44fb-823f-a7486367ba78-kube-api-access-xkcgk\") pod \"dnsmasq-dns-6c7c55f8b9-fkwpf\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.704597 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5wfk\" (UniqueName: \"kubernetes.io/projected/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-kube-api-access-d5wfk\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.704679 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-config-data\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.704745 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-credential-keys\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.704779 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-combined-ca-bundle\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.704824 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-scripts\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.704898 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-fernet-keys\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.708760 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-combined-ca-bundle\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.708913 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-scripts\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.709477 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-credential-keys\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.718514 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-config-data\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.719440 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-fernet-keys\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.721243 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5wfk\" (UniqueName: \"kubernetes.io/projected/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-kube-api-access-d5wfk\") pod \"keystone-bootstrap-zscd2\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.799881 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 22:59:58 crc kubenswrapper[5014]: I1006 22:59:58.856383 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zscd2" Oct 06 22:59:59 crc kubenswrapper[5014]: W1006 22:59:59.296018 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff5b5727_cb7f_44fb_823f_a7486367ba78.slice/crio-2f46f825e1c527d17c116dd0f4c7af7a8be91f4e3139c38c1e6ccb1fac552573 WatchSource:0}: Error finding container 2f46f825e1c527d17c116dd0f4c7af7a8be91f4e3139c38c1e6ccb1fac552573: Status 404 returned error can't find the container with id 2f46f825e1c527d17c116dd0f4c7af7a8be91f4e3139c38c1e6ccb1fac552573 Oct 06 22:59:59 crc kubenswrapper[5014]: I1006 22:59:59.297058 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c7c55f8b9-fkwpf"] Oct 06 22:59:59 crc kubenswrapper[5014]: I1006 22:59:59.348148 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-zscd2"] Oct 06 22:59:59 crc kubenswrapper[5014]: W1006 22:59:59.358727 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4dfbd525_38ef_4d9b_aaa4_7ac5facf0bcf.slice/crio-4c647bb1604db86dcd831f466547987c8e9119d238a79ff218eb939db344e672 WatchSource:0}: Error finding container 4c647bb1604db86dcd831f466547987c8e9119d238a79ff218eb939db344e672: Status 404 returned error can't find the container with id 4c647bb1604db86dcd831f466547987c8e9119d238a79ff218eb939db344e672 Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.142823 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928"] Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.145820 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.148387 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.149301 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.164411 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928"] Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.247528 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e4ee9a6a-9099-48e4-83d6-1c988edf786a-secret-volume\") pod \"collect-profiles-29329860-fq928\" (UID: \"e4ee9a6a-9099-48e4-83d6-1c988edf786a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.247603 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e4ee9a6a-9099-48e4-83d6-1c988edf786a-config-volume\") pod \"collect-profiles-29329860-fq928\" (UID: \"e4ee9a6a-9099-48e4-83d6-1c988edf786a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.247704 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpvr7\" (UniqueName: \"kubernetes.io/projected/e4ee9a6a-9099-48e4-83d6-1c988edf786a-kube-api-access-hpvr7\") pod \"collect-profiles-29329860-fq928\" (UID: \"e4ee9a6a-9099-48e4-83d6-1c988edf786a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.281465 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zscd2" event={"ID":"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf","Type":"ContainerStarted","Data":"381353fb34afa6363549c3202bfb671473be4e37b2d2260cdf6c01ffbf096552"} Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.281516 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zscd2" event={"ID":"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf","Type":"ContainerStarted","Data":"4c647bb1604db86dcd831f466547987c8e9119d238a79ff218eb939db344e672"} Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.285367 5014 generic.go:334] "Generic (PLEG): container finished" podID="ff5b5727-cb7f-44fb-823f-a7486367ba78" containerID="1ef5aa10d5b18f70f3b592b6065344aa274ccc67ea7427ccf43878cba412dd4d" exitCode=0 Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.285426 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" event={"ID":"ff5b5727-cb7f-44fb-823f-a7486367ba78","Type":"ContainerDied","Data":"1ef5aa10d5b18f70f3b592b6065344aa274ccc67ea7427ccf43878cba412dd4d"} Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.285458 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" event={"ID":"ff5b5727-cb7f-44fb-823f-a7486367ba78","Type":"ContainerStarted","Data":"2f46f825e1c527d17c116dd0f4c7af7a8be91f4e3139c38c1e6ccb1fac552573"} Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.306367 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-zscd2" podStartSLOduration=2.306345921 podStartE2EDuration="2.306345921s" podCreationTimestamp="2025-10-06 22:59:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:00:00.296916566 +0000 UTC m=+5345.589953310" watchObservedRunningTime="2025-10-06 23:00:00.306345921 +0000 UTC m=+5345.599382665" Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.351634 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpvr7\" (UniqueName: \"kubernetes.io/projected/e4ee9a6a-9099-48e4-83d6-1c988edf786a-kube-api-access-hpvr7\") pod \"collect-profiles-29329860-fq928\" (UID: \"e4ee9a6a-9099-48e4-83d6-1c988edf786a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.351902 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e4ee9a6a-9099-48e4-83d6-1c988edf786a-secret-volume\") pod \"collect-profiles-29329860-fq928\" (UID: \"e4ee9a6a-9099-48e4-83d6-1c988edf786a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.352098 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e4ee9a6a-9099-48e4-83d6-1c988edf786a-config-volume\") pod \"collect-profiles-29329860-fq928\" (UID: \"e4ee9a6a-9099-48e4-83d6-1c988edf786a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.353169 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e4ee9a6a-9099-48e4-83d6-1c988edf786a-config-volume\") pod \"collect-profiles-29329860-fq928\" (UID: \"e4ee9a6a-9099-48e4-83d6-1c988edf786a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.361778 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e4ee9a6a-9099-48e4-83d6-1c988edf786a-secret-volume\") pod \"collect-profiles-29329860-fq928\" (UID: \"e4ee9a6a-9099-48e4-83d6-1c988edf786a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.379037 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpvr7\" (UniqueName: \"kubernetes.io/projected/e4ee9a6a-9099-48e4-83d6-1c988edf786a-kube-api-access-hpvr7\") pod \"collect-profiles-29329860-fq928\" (UID: \"e4ee9a6a-9099-48e4-83d6-1c988edf786a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.507592 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" Oct 06 23:00:00 crc kubenswrapper[5014]: I1006 23:00:00.966705 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928"] Oct 06 23:00:00 crc kubenswrapper[5014]: W1006 23:00:00.969908 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode4ee9a6a_9099_48e4_83d6_1c988edf786a.slice/crio-6d2f0b80eea587183b018fe86cc87f3fab499ffe2f03199cf8092d13861d1a67 WatchSource:0}: Error finding container 6d2f0b80eea587183b018fe86cc87f3fab499ffe2f03199cf8092d13861d1a67: Status 404 returned error can't find the container with id 6d2f0b80eea587183b018fe86cc87f3fab499ffe2f03199cf8092d13861d1a67 Oct 06 23:00:01 crc kubenswrapper[5014]: I1006 23:00:01.319760 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" event={"ID":"e4ee9a6a-9099-48e4-83d6-1c988edf786a","Type":"ContainerStarted","Data":"97de527d99641570ddaf36174ccfaacc4c72ebd2c63d5c68b3bb22954d9ede32"} Oct 06 23:00:01 crc kubenswrapper[5014]: I1006 23:00:01.319985 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" event={"ID":"e4ee9a6a-9099-48e4-83d6-1c988edf786a","Type":"ContainerStarted","Data":"6d2f0b80eea587183b018fe86cc87f3fab499ffe2f03199cf8092d13861d1a67"} Oct 06 23:00:01 crc kubenswrapper[5014]: I1006 23:00:01.325233 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" event={"ID":"ff5b5727-cb7f-44fb-823f-a7486367ba78","Type":"ContainerStarted","Data":"554d4a657625ffa80048095ff4aa0026008d843965264a98ea23801fd8f374f8"} Oct 06 23:00:01 crc kubenswrapper[5014]: I1006 23:00:01.325616 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 23:00:01 crc kubenswrapper[5014]: I1006 23:00:01.341074 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" podStartSLOduration=1.341046316 podStartE2EDuration="1.341046316s" podCreationTimestamp="2025-10-06 23:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:00:01.335440372 +0000 UTC m=+5346.628477106" watchObservedRunningTime="2025-10-06 23:00:01.341046316 +0000 UTC m=+5346.634083080" Oct 06 23:00:01 crc kubenswrapper[5014]: I1006 23:00:01.357594 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" podStartSLOduration=3.357573132 podStartE2EDuration="3.357573132s" podCreationTimestamp="2025-10-06 22:59:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:00:01.354847997 +0000 UTC m=+5346.647884771" watchObservedRunningTime="2025-10-06 23:00:01.357573132 +0000 UTC m=+5346.650609886" Oct 06 23:00:02 crc kubenswrapper[5014]: I1006 23:00:02.335876 5014 generic.go:334] "Generic (PLEG): container finished" podID="e4ee9a6a-9099-48e4-83d6-1c988edf786a" containerID="97de527d99641570ddaf36174ccfaacc4c72ebd2c63d5c68b3bb22954d9ede32" exitCode=0 Oct 06 23:00:02 crc kubenswrapper[5014]: I1006 23:00:02.335989 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" event={"ID":"e4ee9a6a-9099-48e4-83d6-1c988edf786a","Type":"ContainerDied","Data":"97de527d99641570ddaf36174ccfaacc4c72ebd2c63d5c68b3bb22954d9ede32"} Oct 06 23:00:03 crc kubenswrapper[5014]: I1006 23:00:03.351275 5014 generic.go:334] "Generic (PLEG): container finished" podID="4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf" containerID="381353fb34afa6363549c3202bfb671473be4e37b2d2260cdf6c01ffbf096552" exitCode=0 Oct 06 23:00:03 crc kubenswrapper[5014]: I1006 23:00:03.351366 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zscd2" event={"ID":"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf","Type":"ContainerDied","Data":"381353fb34afa6363549c3202bfb671473be4e37b2d2260cdf6c01ffbf096552"} Oct 06 23:00:03 crc kubenswrapper[5014]: I1006 23:00:03.705337 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" Oct 06 23:00:03 crc kubenswrapper[5014]: I1006 23:00:03.722946 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e4ee9a6a-9099-48e4-83d6-1c988edf786a-config-volume\") pod \"e4ee9a6a-9099-48e4-83d6-1c988edf786a\" (UID: \"e4ee9a6a-9099-48e4-83d6-1c988edf786a\") " Oct 06 23:00:03 crc kubenswrapper[5014]: I1006 23:00:03.723707 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4ee9a6a-9099-48e4-83d6-1c988edf786a-config-volume" (OuterVolumeSpecName: "config-volume") pod "e4ee9a6a-9099-48e4-83d6-1c988edf786a" (UID: "e4ee9a6a-9099-48e4-83d6-1c988edf786a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 23:00:03 crc kubenswrapper[5014]: I1006 23:00:03.724137 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hpvr7\" (UniqueName: \"kubernetes.io/projected/e4ee9a6a-9099-48e4-83d6-1c988edf786a-kube-api-access-hpvr7\") pod \"e4ee9a6a-9099-48e4-83d6-1c988edf786a\" (UID: \"e4ee9a6a-9099-48e4-83d6-1c988edf786a\") " Oct 06 23:00:03 crc kubenswrapper[5014]: I1006 23:00:03.724304 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e4ee9a6a-9099-48e4-83d6-1c988edf786a-secret-volume\") pod \"e4ee9a6a-9099-48e4-83d6-1c988edf786a\" (UID: \"e4ee9a6a-9099-48e4-83d6-1c988edf786a\") " Oct 06 23:00:03 crc kubenswrapper[5014]: I1006 23:00:03.727724 5014 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e4ee9a6a-9099-48e4-83d6-1c988edf786a-config-volume\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:03 crc kubenswrapper[5014]: I1006 23:00:03.730910 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4ee9a6a-9099-48e4-83d6-1c988edf786a-kube-api-access-hpvr7" (OuterVolumeSpecName: "kube-api-access-hpvr7") pod "e4ee9a6a-9099-48e4-83d6-1c988edf786a" (UID: "e4ee9a6a-9099-48e4-83d6-1c988edf786a"). InnerVolumeSpecName "kube-api-access-hpvr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:00:03 crc kubenswrapper[5014]: I1006 23:00:03.731049 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4ee9a6a-9099-48e4-83d6-1c988edf786a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e4ee9a6a-9099-48e4-83d6-1c988edf786a" (UID: "e4ee9a6a-9099-48e4-83d6-1c988edf786a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:00:03 crc kubenswrapper[5014]: I1006 23:00:03.829068 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hpvr7\" (UniqueName: \"kubernetes.io/projected/e4ee9a6a-9099-48e4-83d6-1c988edf786a-kube-api-access-hpvr7\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:03 crc kubenswrapper[5014]: I1006 23:00:03.829119 5014 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e4ee9a6a-9099-48e4-83d6-1c988edf786a-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.363447 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.363444 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29329860-fq928" event={"ID":"e4ee9a6a-9099-48e4-83d6-1c988edf786a","Type":"ContainerDied","Data":"6d2f0b80eea587183b018fe86cc87f3fab499ffe2f03199cf8092d13861d1a67"} Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.363494 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d2f0b80eea587183b018fe86cc87f3fab499ffe2f03199cf8092d13861d1a67" Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.436139 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s"] Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.444178 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29329815-s8g4s"] Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.720988 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zscd2" Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.743189 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-config-data\") pod \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.743243 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-scripts\") pod \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.743269 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-credential-keys\") pod \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.743309 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d5wfk\" (UniqueName: \"kubernetes.io/projected/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-kube-api-access-d5wfk\") pod \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.743435 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-fernet-keys\") pod \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.743528 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-combined-ca-bundle\") pod \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\" (UID: \"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf\") " Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.751735 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf" (UID: "4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.751779 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-scripts" (OuterVolumeSpecName: "scripts") pod "4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf" (UID: "4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.758913 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf" (UID: "4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.769998 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-kube-api-access-d5wfk" (OuterVolumeSpecName: "kube-api-access-d5wfk") pod "4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf" (UID: "4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf"). InnerVolumeSpecName "kube-api-access-d5wfk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.773965 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-config-data" (OuterVolumeSpecName: "config-data") pod "4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf" (UID: "4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.774962 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf" (UID: "4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.845021 5014 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.845266 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.845332 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.845384 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.845434 5014 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-credential-keys\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:04 crc kubenswrapper[5014]: I1006 23:00:04.845483 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d5wfk\" (UniqueName: \"kubernetes.io/projected/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf-kube-api-access-d5wfk\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.373723 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zscd2" event={"ID":"4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf","Type":"ContainerDied","Data":"4c647bb1604db86dcd831f466547987c8e9119d238a79ff218eb939db344e672"} Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.373768 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4c647bb1604db86dcd831f466547987c8e9119d238a79ff218eb939db344e672" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.373750 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zscd2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.441396 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-zscd2"] Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.447631 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-zscd2"] Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.498178 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="387f2808-42e5-4664-80cf-9c267b71a798" path="/var/lib/kubelet/pods/387f2808-42e5-4664-80cf-9c267b71a798/volumes" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.498745 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf" path="/var/lib/kubelet/pods/4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf/volumes" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.554264 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-cjxq2"] Oct 06 23:00:05 crc kubenswrapper[5014]: E1006 23:00:05.555131 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf" containerName="keystone-bootstrap" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.555164 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf" containerName="keystone-bootstrap" Oct 06 23:00:05 crc kubenswrapper[5014]: E1006 23:00:05.555213 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4ee9a6a-9099-48e4-83d6-1c988edf786a" containerName="collect-profiles" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.555226 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4ee9a6a-9099-48e4-83d6-1c988edf786a" containerName="collect-profiles" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.555513 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4ee9a6a-9099-48e4-83d6-1c988edf786a" containerName="collect-profiles" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.555550 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dfbd525-38ef-4d9b-aaa4-7ac5facf0bcf" containerName="keystone-bootstrap" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.556471 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.561327 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.561416 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.561683 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.561829 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-q97n8" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.575307 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-cjxq2"] Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.660168 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-credential-keys\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.660277 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-config-data\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.660303 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-scripts\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.660329 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-combined-ca-bundle\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.660413 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-fernet-keys\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.660500 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95bwb\" (UniqueName: \"kubernetes.io/projected/1cf816d0-0a38-4417-91a3-be05870aa60d-kube-api-access-95bwb\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.762536 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95bwb\" (UniqueName: \"kubernetes.io/projected/1cf816d0-0a38-4417-91a3-be05870aa60d-kube-api-access-95bwb\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.762586 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-credential-keys\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.762659 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-config-data\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.762676 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-scripts\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.762697 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-combined-ca-bundle\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.762721 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-fernet-keys\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.768206 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-scripts\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.768767 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-fernet-keys\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.769015 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-combined-ca-bundle\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.769289 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-credential-keys\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.770919 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-config-data\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.791049 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95bwb\" (UniqueName: \"kubernetes.io/projected/1cf816d0-0a38-4417-91a3-be05870aa60d-kube-api-access-95bwb\") pod \"keystone-bootstrap-cjxq2\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:05 crc kubenswrapper[5014]: I1006 23:00:05.910812 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:06 crc kubenswrapper[5014]: W1006 23:00:06.403742 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1cf816d0_0a38_4417_91a3_be05870aa60d.slice/crio-45a9a25a2c24de7be6f6385ef121b0fe626e66908432d8cafa98d716c0568e03 WatchSource:0}: Error finding container 45a9a25a2c24de7be6f6385ef121b0fe626e66908432d8cafa98d716c0568e03: Status 404 returned error can't find the container with id 45a9a25a2c24de7be6f6385ef121b0fe626e66908432d8cafa98d716c0568e03 Oct 06 23:00:06 crc kubenswrapper[5014]: I1006 23:00:06.412521 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-cjxq2"] Oct 06 23:00:07 crc kubenswrapper[5014]: I1006 23:00:07.401002 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-cjxq2" event={"ID":"1cf816d0-0a38-4417-91a3-be05870aa60d","Type":"ContainerStarted","Data":"d2f1514b1e8722df8292b25012388ed4e1f253e2299f2e46fe48fc04d83f4345"} Oct 06 23:00:07 crc kubenswrapper[5014]: I1006 23:00:07.401443 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-cjxq2" event={"ID":"1cf816d0-0a38-4417-91a3-be05870aa60d","Type":"ContainerStarted","Data":"45a9a25a2c24de7be6f6385ef121b0fe626e66908432d8cafa98d716c0568e03"} Oct 06 23:00:07 crc kubenswrapper[5014]: I1006 23:00:07.441214 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-cjxq2" podStartSLOduration=2.441187012 podStartE2EDuration="2.441187012s" podCreationTimestamp="2025-10-06 23:00:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:00:07.430054765 +0000 UTC m=+5352.723091529" watchObservedRunningTime="2025-10-06 23:00:07.441187012 +0000 UTC m=+5352.734223776" Oct 06 23:00:08 crc kubenswrapper[5014]: I1006 23:00:08.801931 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 23:00:08 crc kubenswrapper[5014]: I1006 23:00:08.909545 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-69864b896c-6crm5"] Oct 06 23:00:08 crc kubenswrapper[5014]: I1006 23:00:08.909835 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-69864b896c-6crm5" podUID="96fd7974-927c-499a-9221-b04af21765da" containerName="dnsmasq-dns" containerID="cri-o://357466a4a53aa038aef3c14d1106ae1f16c32b204c7e61bfdc715636d04266c0" gracePeriod=10 Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.418847 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.420410 5014 generic.go:334] "Generic (PLEG): container finished" podID="96fd7974-927c-499a-9221-b04af21765da" containerID="357466a4a53aa038aef3c14d1106ae1f16c32b204c7e61bfdc715636d04266c0" exitCode=0 Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.420515 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69864b896c-6crm5" event={"ID":"96fd7974-927c-499a-9221-b04af21765da","Type":"ContainerDied","Data":"357466a4a53aa038aef3c14d1106ae1f16c32b204c7e61bfdc715636d04266c0"} Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.420550 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69864b896c-6crm5" event={"ID":"96fd7974-927c-499a-9221-b04af21765da","Type":"ContainerDied","Data":"a1b2d97e8d09dcc8ae5f4570bcc529f747b07207291e569dbb907d40a889fcc5"} Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.420573 5014 scope.go:117] "RemoveContainer" containerID="357466a4a53aa038aef3c14d1106ae1f16c32b204c7e61bfdc715636d04266c0" Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.421967 5014 generic.go:334] "Generic (PLEG): container finished" podID="1cf816d0-0a38-4417-91a3-be05870aa60d" containerID="d2f1514b1e8722df8292b25012388ed4e1f253e2299f2e46fe48fc04d83f4345" exitCode=0 Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.421996 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-cjxq2" event={"ID":"1cf816d0-0a38-4417-91a3-be05870aa60d","Type":"ContainerDied","Data":"d2f1514b1e8722df8292b25012388ed4e1f253e2299f2e46fe48fc04d83f4345"} Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.451077 5014 scope.go:117] "RemoveContainer" containerID="3a60fe28ecc7347e9d6ffd72c014a662a524d00cad34dcf4fcde41061f68d4a2" Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.475187 5014 scope.go:117] "RemoveContainer" containerID="357466a4a53aa038aef3c14d1106ae1f16c32b204c7e61bfdc715636d04266c0" Oct 06 23:00:09 crc kubenswrapper[5014]: E1006 23:00:09.475675 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"357466a4a53aa038aef3c14d1106ae1f16c32b204c7e61bfdc715636d04266c0\": container with ID starting with 357466a4a53aa038aef3c14d1106ae1f16c32b204c7e61bfdc715636d04266c0 not found: ID does not exist" containerID="357466a4a53aa038aef3c14d1106ae1f16c32b204c7e61bfdc715636d04266c0" Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.475717 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"357466a4a53aa038aef3c14d1106ae1f16c32b204c7e61bfdc715636d04266c0"} err="failed to get container status \"357466a4a53aa038aef3c14d1106ae1f16c32b204c7e61bfdc715636d04266c0\": rpc error: code = NotFound desc = could not find container \"357466a4a53aa038aef3c14d1106ae1f16c32b204c7e61bfdc715636d04266c0\": container with ID starting with 357466a4a53aa038aef3c14d1106ae1f16c32b204c7e61bfdc715636d04266c0 not found: ID does not exist" Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.475745 5014 scope.go:117] "RemoveContainer" containerID="3a60fe28ecc7347e9d6ffd72c014a662a524d00cad34dcf4fcde41061f68d4a2" Oct 06 23:00:09 crc kubenswrapper[5014]: E1006 23:00:09.476042 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a60fe28ecc7347e9d6ffd72c014a662a524d00cad34dcf4fcde41061f68d4a2\": container with ID starting with 3a60fe28ecc7347e9d6ffd72c014a662a524d00cad34dcf4fcde41061f68d4a2 not found: ID does not exist" containerID="3a60fe28ecc7347e9d6ffd72c014a662a524d00cad34dcf4fcde41061f68d4a2" Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.476066 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a60fe28ecc7347e9d6ffd72c014a662a524d00cad34dcf4fcde41061f68d4a2"} err="failed to get container status \"3a60fe28ecc7347e9d6ffd72c014a662a524d00cad34dcf4fcde41061f68d4a2\": rpc error: code = NotFound desc = could not find container \"3a60fe28ecc7347e9d6ffd72c014a662a524d00cad34dcf4fcde41061f68d4a2\": container with ID starting with 3a60fe28ecc7347e9d6ffd72c014a662a524d00cad34dcf4fcde41061f68d4a2 not found: ID does not exist" Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.569364 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-config\") pod \"96fd7974-927c-499a-9221-b04af21765da\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.569606 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-ovsdbserver-sb\") pod \"96fd7974-927c-499a-9221-b04af21765da\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.569680 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-dns-svc\") pod \"96fd7974-927c-499a-9221-b04af21765da\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.569709 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qpc6c\" (UniqueName: \"kubernetes.io/projected/96fd7974-927c-499a-9221-b04af21765da-kube-api-access-qpc6c\") pod \"96fd7974-927c-499a-9221-b04af21765da\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.569738 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-ovsdbserver-nb\") pod \"96fd7974-927c-499a-9221-b04af21765da\" (UID: \"96fd7974-927c-499a-9221-b04af21765da\") " Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.574687 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96fd7974-927c-499a-9221-b04af21765da-kube-api-access-qpc6c" (OuterVolumeSpecName: "kube-api-access-qpc6c") pod "96fd7974-927c-499a-9221-b04af21765da" (UID: "96fd7974-927c-499a-9221-b04af21765da"). InnerVolumeSpecName "kube-api-access-qpc6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.603461 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "96fd7974-927c-499a-9221-b04af21765da" (UID: "96fd7974-927c-499a-9221-b04af21765da"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.612580 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "96fd7974-927c-499a-9221-b04af21765da" (UID: "96fd7974-927c-499a-9221-b04af21765da"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.619145 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "96fd7974-927c-499a-9221-b04af21765da" (UID: "96fd7974-927c-499a-9221-b04af21765da"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.623327 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-config" (OuterVolumeSpecName: "config") pod "96fd7974-927c-499a-9221-b04af21765da" (UID: "96fd7974-927c-499a-9221-b04af21765da"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.671489 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.671523 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qpc6c\" (UniqueName: \"kubernetes.io/projected/96fd7974-927c-499a-9221-b04af21765da-kube-api-access-qpc6c\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.671539 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.671551 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-config\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:09 crc kubenswrapper[5014]: I1006 23:00:09.671562 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/96fd7974-927c-499a-9221-b04af21765da-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:10 crc kubenswrapper[5014]: I1006 23:00:10.436885 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69864b896c-6crm5" Oct 06 23:00:10 crc kubenswrapper[5014]: I1006 23:00:10.502988 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-69864b896c-6crm5"] Oct 06 23:00:10 crc kubenswrapper[5014]: I1006 23:00:10.515971 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-69864b896c-6crm5"] Oct 06 23:00:10 crc kubenswrapper[5014]: I1006 23:00:10.862156 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.006310 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-fernet-keys\") pod \"1cf816d0-0a38-4417-91a3-be05870aa60d\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.006437 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-config-data\") pod \"1cf816d0-0a38-4417-91a3-be05870aa60d\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.006562 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-credential-keys\") pod \"1cf816d0-0a38-4417-91a3-be05870aa60d\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.006766 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-scripts\") pod \"1cf816d0-0a38-4417-91a3-be05870aa60d\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.006824 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95bwb\" (UniqueName: \"kubernetes.io/projected/1cf816d0-0a38-4417-91a3-be05870aa60d-kube-api-access-95bwb\") pod \"1cf816d0-0a38-4417-91a3-be05870aa60d\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.006863 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-combined-ca-bundle\") pod \"1cf816d0-0a38-4417-91a3-be05870aa60d\" (UID: \"1cf816d0-0a38-4417-91a3-be05870aa60d\") " Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.009435 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-scripts" (OuterVolumeSpecName: "scripts") pod "1cf816d0-0a38-4417-91a3-be05870aa60d" (UID: "1cf816d0-0a38-4417-91a3-be05870aa60d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.010176 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "1cf816d0-0a38-4417-91a3-be05870aa60d" (UID: "1cf816d0-0a38-4417-91a3-be05870aa60d"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.011525 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "1cf816d0-0a38-4417-91a3-be05870aa60d" (UID: "1cf816d0-0a38-4417-91a3-be05870aa60d"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.014525 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cf816d0-0a38-4417-91a3-be05870aa60d-kube-api-access-95bwb" (OuterVolumeSpecName: "kube-api-access-95bwb") pod "1cf816d0-0a38-4417-91a3-be05870aa60d" (UID: "1cf816d0-0a38-4417-91a3-be05870aa60d"). InnerVolumeSpecName "kube-api-access-95bwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.044388 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-config-data" (OuterVolumeSpecName: "config-data") pod "1cf816d0-0a38-4417-91a3-be05870aa60d" (UID: "1cf816d0-0a38-4417-91a3-be05870aa60d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.049297 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1cf816d0-0a38-4417-91a3-be05870aa60d" (UID: "1cf816d0-0a38-4417-91a3-be05870aa60d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.109288 5014 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-credential-keys\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.109317 5014 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-scripts\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.109327 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95bwb\" (UniqueName: \"kubernetes.io/projected/1cf816d0-0a38-4417-91a3-be05870aa60d-kube-api-access-95bwb\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.109335 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.109352 5014 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.109362 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cf816d0-0a38-4417-91a3-be05870aa60d-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.451604 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-cjxq2" event={"ID":"1cf816d0-0a38-4417-91a3-be05870aa60d","Type":"ContainerDied","Data":"45a9a25a2c24de7be6f6385ef121b0fe626e66908432d8cafa98d716c0568e03"} Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.451713 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-cjxq2" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.451722 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45a9a25a2c24de7be6f6385ef121b0fe626e66908432d8cafa98d716c0568e03" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.501894 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96fd7974-927c-499a-9221-b04af21765da" path="/var/lib/kubelet/pods/96fd7974-927c-499a-9221-b04af21765da/volumes" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.658686 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-5c4c59f684-p8fnf"] Oct 06 23:00:11 crc kubenswrapper[5014]: E1006 23:00:11.659021 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96fd7974-927c-499a-9221-b04af21765da" containerName="dnsmasq-dns" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.659043 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="96fd7974-927c-499a-9221-b04af21765da" containerName="dnsmasq-dns" Oct 06 23:00:11 crc kubenswrapper[5014]: E1006 23:00:11.659056 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cf816d0-0a38-4417-91a3-be05870aa60d" containerName="keystone-bootstrap" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.659063 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cf816d0-0a38-4417-91a3-be05870aa60d" containerName="keystone-bootstrap" Oct 06 23:00:11 crc kubenswrapper[5014]: E1006 23:00:11.659109 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96fd7974-927c-499a-9221-b04af21765da" containerName="init" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.659118 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="96fd7974-927c-499a-9221-b04af21765da" containerName="init" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.659288 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="96fd7974-927c-499a-9221-b04af21765da" containerName="dnsmasq-dns" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.659304 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cf816d0-0a38-4417-91a3-be05870aa60d" containerName="keystone-bootstrap" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.659868 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.663385 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.663669 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.663681 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.663697 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.665003 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-q97n8" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.665318 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.692133 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5c4c59f684-p8fnf"] Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.722867 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjvlb\" (UniqueName: \"kubernetes.io/projected/05be9aa1-8c19-404f-9769-c0bf03794d09-kube-api-access-bjvlb\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.723154 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-credential-keys\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.723259 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-scripts\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.723286 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-combined-ca-bundle\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.723354 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-config-data\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.723393 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-internal-tls-certs\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.723427 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-public-tls-certs\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.723506 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-fernet-keys\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.825202 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-scripts\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.825490 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-combined-ca-bundle\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.825677 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-config-data\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.825814 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-internal-tls-certs\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.825931 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-public-tls-certs\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.826090 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-fernet-keys\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.826215 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjvlb\" (UniqueName: \"kubernetes.io/projected/05be9aa1-8c19-404f-9769-c0bf03794d09-kube-api-access-bjvlb\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.826309 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-credential-keys\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.832166 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-scripts\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.832262 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-credential-keys\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.832272 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-internal-tls-certs\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.832769 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-fernet-keys\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.833490 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-combined-ca-bundle\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.834203 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-config-data\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.834373 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/05be9aa1-8c19-404f-9769-c0bf03794d09-public-tls-certs\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.850306 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjvlb\" (UniqueName: \"kubernetes.io/projected/05be9aa1-8c19-404f-9769-c0bf03794d09-kube-api-access-bjvlb\") pod \"keystone-5c4c59f684-p8fnf\" (UID: \"05be9aa1-8c19-404f-9769-c0bf03794d09\") " pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:11 crc kubenswrapper[5014]: I1006 23:00:11.985019 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:12 crc kubenswrapper[5014]: I1006 23:00:12.476733 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5c4c59f684-p8fnf"] Oct 06 23:00:12 crc kubenswrapper[5014]: I1006 23:00:12.485510 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 23:00:12 crc kubenswrapper[5014]: E1006 23:00:12.485838 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:00:12 crc kubenswrapper[5014]: W1006 23:00:12.487559 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod05be9aa1_8c19_404f_9769_c0bf03794d09.slice/crio-382f95e580890630eaa9929dac4690815729fa18daf2bbfb32e954a487f3c6a5 WatchSource:0}: Error finding container 382f95e580890630eaa9929dac4690815729fa18daf2bbfb32e954a487f3c6a5: Status 404 returned error can't find the container with id 382f95e580890630eaa9929dac4690815729fa18daf2bbfb32e954a487f3c6a5 Oct 06 23:00:13 crc kubenswrapper[5014]: I1006 23:00:13.475423 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5c4c59f684-p8fnf" event={"ID":"05be9aa1-8c19-404f-9769-c0bf03794d09","Type":"ContainerStarted","Data":"0250ef8e7b48ff0a400e5689159191c0ccca389cd6fc4ad885a246dd1016530f"} Oct 06 23:00:13 crc kubenswrapper[5014]: I1006 23:00:13.476991 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5c4c59f684-p8fnf" event={"ID":"05be9aa1-8c19-404f-9769-c0bf03794d09","Type":"ContainerStarted","Data":"382f95e580890630eaa9929dac4690815729fa18daf2bbfb32e954a487f3c6a5"} Oct 06 23:00:13 crc kubenswrapper[5014]: I1006 23:00:13.477169 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:13 crc kubenswrapper[5014]: I1006 23:00:13.529006 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-5c4c59f684-p8fnf" podStartSLOduration=2.528976831 podStartE2EDuration="2.528976831s" podCreationTimestamp="2025-10-06 23:00:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:00:13.512006691 +0000 UTC m=+5358.805043465" watchObservedRunningTime="2025-10-06 23:00:13.528976831 +0000 UTC m=+5358.822013595" Oct 06 23:00:14 crc kubenswrapper[5014]: I1006 23:00:14.289950 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-69864b896c-6crm5" podUID="96fd7974-927c-499a-9221-b04af21765da" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.18:5353: i/o timeout" Oct 06 23:00:25 crc kubenswrapper[5014]: I1006 23:00:25.494193 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 23:00:25 crc kubenswrapper[5014]: E1006 23:00:25.495139 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:00:40 crc kubenswrapper[5014]: I1006 23:00:40.485482 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 23:00:40 crc kubenswrapper[5014]: E1006 23:00:40.486440 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:00:43 crc kubenswrapper[5014]: I1006 23:00:43.396117 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-5c4c59f684-p8fnf" Oct 06 23:00:47 crc kubenswrapper[5014]: I1006 23:00:47.954699 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Oct 06 23:00:47 crc kubenswrapper[5014]: I1006 23:00:47.958135 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 06 23:00:47 crc kubenswrapper[5014]: I1006 23:00:47.961920 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Oct 06 23:00:47 crc kubenswrapper[5014]: I1006 23:00:47.962361 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-rrlsn" Oct 06 23:00:47 crc kubenswrapper[5014]: I1006 23:00:47.962717 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Oct 06 23:00:47 crc kubenswrapper[5014]: I1006 23:00:47.973157 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.026231 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Oct 06 23:00:48 crc kubenswrapper[5014]: E1006 23:00:48.038506 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle kube-api-access-mt54g openstack-config openstack-config-secret], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/openstackclient" podUID="e50663a5-b275-457d-800f-03e9b222fefb" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.039015 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.105581 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e50663a5-b275-457d-800f-03e9b222fefb-openstack-config-secret\") pod \"openstackclient\" (UID: \"e50663a5-b275-457d-800f-03e9b222fefb\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.105703 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e50663a5-b275-457d-800f-03e9b222fefb-openstack-config\") pod \"openstackclient\" (UID: \"e50663a5-b275-457d-800f-03e9b222fefb\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.105823 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mt54g\" (UniqueName: \"kubernetes.io/projected/e50663a5-b275-457d-800f-03e9b222fefb-kube-api-access-mt54g\") pod \"openstackclient\" (UID: \"e50663a5-b275-457d-800f-03e9b222fefb\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.105985 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e50663a5-b275-457d-800f-03e9b222fefb-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e50663a5-b275-457d-800f-03e9b222fefb\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.136197 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.137584 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.144664 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.207782 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e50663a5-b275-457d-800f-03e9b222fefb-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e50663a5-b275-457d-800f-03e9b222fefb\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.208095 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e50663a5-b275-457d-800f-03e9b222fefb-openstack-config-secret\") pod \"openstackclient\" (UID: \"e50663a5-b275-457d-800f-03e9b222fefb\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.208179 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e50663a5-b275-457d-800f-03e9b222fefb-openstack-config\") pod \"openstackclient\" (UID: \"e50663a5-b275-457d-800f-03e9b222fefb\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.208276 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mt54g\" (UniqueName: \"kubernetes.io/projected/e50663a5-b275-457d-800f-03e9b222fefb-kube-api-access-mt54g\") pod \"openstackclient\" (UID: \"e50663a5-b275-457d-800f-03e9b222fefb\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.210214 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e50663a5-b275-457d-800f-03e9b222fefb-openstack-config\") pod \"openstackclient\" (UID: \"e50663a5-b275-457d-800f-03e9b222fefb\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: E1006 23:00:48.211076 5014 projected.go:194] Error preparing data for projected volume kube-api-access-mt54g for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (e50663a5-b275-457d-800f-03e9b222fefb) does not match the UID in record. The object might have been deleted and then recreated Oct 06 23:00:48 crc kubenswrapper[5014]: E1006 23:00:48.211149 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e50663a5-b275-457d-800f-03e9b222fefb-kube-api-access-mt54g podName:e50663a5-b275-457d-800f-03e9b222fefb nodeName:}" failed. No retries permitted until 2025-10-06 23:00:48.711128464 +0000 UTC m=+5394.004165208 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-mt54g" (UniqueName: "kubernetes.io/projected/e50663a5-b275-457d-800f-03e9b222fefb-kube-api-access-mt54g") pod "openstackclient" (UID: "e50663a5-b275-457d-800f-03e9b222fefb") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (e50663a5-b275-457d-800f-03e9b222fefb) does not match the UID in record. The object might have been deleted and then recreated Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.213979 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e50663a5-b275-457d-800f-03e9b222fefb-openstack-config-secret\") pod \"openstackclient\" (UID: \"e50663a5-b275-457d-800f-03e9b222fefb\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.221166 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e50663a5-b275-457d-800f-03e9b222fefb-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e50663a5-b275-457d-800f-03e9b222fefb\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.309427 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af37df4c-adee-4877-83cb-35b343b6c8e7-combined-ca-bundle\") pod \"openstackclient\" (UID: \"af37df4c-adee-4877-83cb-35b343b6c8e7\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.309770 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/af37df4c-adee-4877-83cb-35b343b6c8e7-openstack-config-secret\") pod \"openstackclient\" (UID: \"af37df4c-adee-4877-83cb-35b343b6c8e7\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.309830 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lf6pk\" (UniqueName: \"kubernetes.io/projected/af37df4c-adee-4877-83cb-35b343b6c8e7-kube-api-access-lf6pk\") pod \"openstackclient\" (UID: \"af37df4c-adee-4877-83cb-35b343b6c8e7\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.309875 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/af37df4c-adee-4877-83cb-35b343b6c8e7-openstack-config\") pod \"openstackclient\" (UID: \"af37df4c-adee-4877-83cb-35b343b6c8e7\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.410869 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lf6pk\" (UniqueName: \"kubernetes.io/projected/af37df4c-adee-4877-83cb-35b343b6c8e7-kube-api-access-lf6pk\") pod \"openstackclient\" (UID: \"af37df4c-adee-4877-83cb-35b343b6c8e7\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.410933 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/af37df4c-adee-4877-83cb-35b343b6c8e7-openstack-config\") pod \"openstackclient\" (UID: \"af37df4c-adee-4877-83cb-35b343b6c8e7\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.411029 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af37df4c-adee-4877-83cb-35b343b6c8e7-combined-ca-bundle\") pod \"openstackclient\" (UID: \"af37df4c-adee-4877-83cb-35b343b6c8e7\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.411084 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/af37df4c-adee-4877-83cb-35b343b6c8e7-openstack-config-secret\") pod \"openstackclient\" (UID: \"af37df4c-adee-4877-83cb-35b343b6c8e7\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.412239 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/af37df4c-adee-4877-83cb-35b343b6c8e7-openstack-config\") pod \"openstackclient\" (UID: \"af37df4c-adee-4877-83cb-35b343b6c8e7\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.415800 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af37df4c-adee-4877-83cb-35b343b6c8e7-combined-ca-bundle\") pod \"openstackclient\" (UID: \"af37df4c-adee-4877-83cb-35b343b6c8e7\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.418508 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/af37df4c-adee-4877-83cb-35b343b6c8e7-openstack-config-secret\") pod \"openstackclient\" (UID: \"af37df4c-adee-4877-83cb-35b343b6c8e7\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.429124 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lf6pk\" (UniqueName: \"kubernetes.io/projected/af37df4c-adee-4877-83cb-35b343b6c8e7-kube-api-access-lf6pk\") pod \"openstackclient\" (UID: \"af37df4c-adee-4877-83cb-35b343b6c8e7\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.469491 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.716427 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mt54g\" (UniqueName: \"kubernetes.io/projected/e50663a5-b275-457d-800f-03e9b222fefb-kube-api-access-mt54g\") pod \"openstackclient\" (UID: \"e50663a5-b275-457d-800f-03e9b222fefb\") " pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: E1006 23:00:48.719707 5014 projected.go:194] Error preparing data for projected volume kube-api-access-mt54g for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (e50663a5-b275-457d-800f-03e9b222fefb) does not match the UID in record. The object might have been deleted and then recreated Oct 06 23:00:48 crc kubenswrapper[5014]: E1006 23:00:48.719769 5014 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e50663a5-b275-457d-800f-03e9b222fefb-kube-api-access-mt54g podName:e50663a5-b275-457d-800f-03e9b222fefb nodeName:}" failed. No retries permitted until 2025-10-06 23:00:49.719754505 +0000 UTC m=+5395.012791239 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-mt54g" (UniqueName: "kubernetes.io/projected/e50663a5-b275-457d-800f-03e9b222fefb-kube-api-access-mt54g") pod "openstackclient" (UID: "e50663a5-b275-457d-800f-03e9b222fefb") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (e50663a5-b275-457d-800f-03e9b222fefb) does not match the UID in record. The object might have been deleted and then recreated Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.842549 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.846754 5014 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="e50663a5-b275-457d-800f-03e9b222fefb" podUID="af37df4c-adee-4877-83cb-35b343b6c8e7" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.853542 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.922807 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e50663a5-b275-457d-800f-03e9b222fefb-openstack-config\") pod \"e50663a5-b275-457d-800f-03e9b222fefb\" (UID: \"e50663a5-b275-457d-800f-03e9b222fefb\") " Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.922979 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e50663a5-b275-457d-800f-03e9b222fefb-combined-ca-bundle\") pod \"e50663a5-b275-457d-800f-03e9b222fefb\" (UID: \"e50663a5-b275-457d-800f-03e9b222fefb\") " Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.923051 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e50663a5-b275-457d-800f-03e9b222fefb-openstack-config-secret\") pod \"e50663a5-b275-457d-800f-03e9b222fefb\" (UID: \"e50663a5-b275-457d-800f-03e9b222fefb\") " Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.923740 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mt54g\" (UniqueName: \"kubernetes.io/projected/e50663a5-b275-457d-800f-03e9b222fefb-kube-api-access-mt54g\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.924098 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e50663a5-b275-457d-800f-03e9b222fefb-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "e50663a5-b275-457d-800f-03e9b222fefb" (UID: "e50663a5-b275-457d-800f-03e9b222fefb"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.926750 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.930679 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e50663a5-b275-457d-800f-03e9b222fefb-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "e50663a5-b275-457d-800f-03e9b222fefb" (UID: "e50663a5-b275-457d-800f-03e9b222fefb"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:00:48 crc kubenswrapper[5014]: I1006 23:00:48.933947 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e50663a5-b275-457d-800f-03e9b222fefb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e50663a5-b275-457d-800f-03e9b222fefb" (UID: "e50663a5-b275-457d-800f-03e9b222fefb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:00:48 crc kubenswrapper[5014]: W1006 23:00:48.940400 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf37df4c_adee_4877_83cb_35b343b6c8e7.slice/crio-1003440e0e6967bf57407ec3c869be9e9e57d88acf63150c25367db9d17e562b WatchSource:0}: Error finding container 1003440e0e6967bf57407ec3c869be9e9e57d88acf63150c25367db9d17e562b: Status 404 returned error can't find the container with id 1003440e0e6967bf57407ec3c869be9e9e57d88acf63150c25367db9d17e562b Oct 06 23:00:49 crc kubenswrapper[5014]: I1006 23:00:49.025981 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e50663a5-b275-457d-800f-03e9b222fefb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:49 crc kubenswrapper[5014]: I1006 23:00:49.026038 5014 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e50663a5-b275-457d-800f-03e9b222fefb-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:49 crc kubenswrapper[5014]: I1006 23:00:49.026062 5014 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e50663a5-b275-457d-800f-03e9b222fefb-openstack-config\") on node \"crc\" DevicePath \"\"" Oct 06 23:00:49 crc kubenswrapper[5014]: I1006 23:00:49.503568 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e50663a5-b275-457d-800f-03e9b222fefb" path="/var/lib/kubelet/pods/e50663a5-b275-457d-800f-03e9b222fefb/volumes" Oct 06 23:00:49 crc kubenswrapper[5014]: I1006 23:00:49.875003 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 06 23:00:49 crc kubenswrapper[5014]: I1006 23:00:49.875062 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"af37df4c-adee-4877-83cb-35b343b6c8e7","Type":"ContainerStarted","Data":"6c1746fd8f81fccf1cede092915905928bc73655313789f164fb1565009ea5b5"} Oct 06 23:00:49 crc kubenswrapper[5014]: I1006 23:00:49.875781 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"af37df4c-adee-4877-83cb-35b343b6c8e7","Type":"ContainerStarted","Data":"1003440e0e6967bf57407ec3c869be9e9e57d88acf63150c25367db9d17e562b"} Oct 06 23:00:49 crc kubenswrapper[5014]: I1006 23:00:49.899344 5014 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="e50663a5-b275-457d-800f-03e9b222fefb" podUID="af37df4c-adee-4877-83cb-35b343b6c8e7" Oct 06 23:00:49 crc kubenswrapper[5014]: I1006 23:00:49.906758 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.9067257020000001 podStartE2EDuration="1.906725702s" podCreationTimestamp="2025-10-06 23:00:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:00:49.895829512 +0000 UTC m=+5395.188866256" watchObservedRunningTime="2025-10-06 23:00:49.906725702 +0000 UTC m=+5395.199762486" Oct 06 23:00:53 crc kubenswrapper[5014]: I1006 23:00:53.485249 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 23:00:53 crc kubenswrapper[5014]: E1006 23:00:53.486356 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.137689 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29329861-59gbv"] Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.139815 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29329861-59gbv" Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.161878 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29329861-59gbv"] Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.293558 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-fernet-keys\") pod \"keystone-cron-29329861-59gbv\" (UID: \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\") " pod="openstack/keystone-cron-29329861-59gbv" Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.293668 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-combined-ca-bundle\") pod \"keystone-cron-29329861-59gbv\" (UID: \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\") " pod="openstack/keystone-cron-29329861-59gbv" Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.293842 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqrx7\" (UniqueName: \"kubernetes.io/projected/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-kube-api-access-gqrx7\") pod \"keystone-cron-29329861-59gbv\" (UID: \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\") " pod="openstack/keystone-cron-29329861-59gbv" Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.293926 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-config-data\") pod \"keystone-cron-29329861-59gbv\" (UID: \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\") " pod="openstack/keystone-cron-29329861-59gbv" Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.395872 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-fernet-keys\") pod \"keystone-cron-29329861-59gbv\" (UID: \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\") " pod="openstack/keystone-cron-29329861-59gbv" Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.396004 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-combined-ca-bundle\") pod \"keystone-cron-29329861-59gbv\" (UID: \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\") " pod="openstack/keystone-cron-29329861-59gbv" Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.396136 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqrx7\" (UniqueName: \"kubernetes.io/projected/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-kube-api-access-gqrx7\") pod \"keystone-cron-29329861-59gbv\" (UID: \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\") " pod="openstack/keystone-cron-29329861-59gbv" Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.396258 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-config-data\") pod \"keystone-cron-29329861-59gbv\" (UID: \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\") " pod="openstack/keystone-cron-29329861-59gbv" Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.410736 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-fernet-keys\") pod \"keystone-cron-29329861-59gbv\" (UID: \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\") " pod="openstack/keystone-cron-29329861-59gbv" Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.411975 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-combined-ca-bundle\") pod \"keystone-cron-29329861-59gbv\" (UID: \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\") " pod="openstack/keystone-cron-29329861-59gbv" Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.412298 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-config-data\") pod \"keystone-cron-29329861-59gbv\" (UID: \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\") " pod="openstack/keystone-cron-29329861-59gbv" Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.428918 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqrx7\" (UniqueName: \"kubernetes.io/projected/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-kube-api-access-gqrx7\") pod \"keystone-cron-29329861-59gbv\" (UID: \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\") " pod="openstack/keystone-cron-29329861-59gbv" Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.462357 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29329861-59gbv" Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.954147 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29329861-59gbv"] Oct 06 23:01:00 crc kubenswrapper[5014]: W1006 23:01:00.957426 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5ae5cdba_2926_4eb6_ba70_1c6b65be53f6.slice/crio-eb5d63d912bfd79871af0ff51dc963055c5a5a4a826930822f5a6d9b7d3c7a00 WatchSource:0}: Error finding container eb5d63d912bfd79871af0ff51dc963055c5a5a4a826930822f5a6d9b7d3c7a00: Status 404 returned error can't find the container with id eb5d63d912bfd79871af0ff51dc963055c5a5a4a826930822f5a6d9b7d3c7a00 Oct 06 23:01:00 crc kubenswrapper[5014]: I1006 23:01:00.998604 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29329861-59gbv" event={"ID":"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6","Type":"ContainerStarted","Data":"eb5d63d912bfd79871af0ff51dc963055c5a5a4a826930822f5a6d9b7d3c7a00"} Oct 06 23:01:02 crc kubenswrapper[5014]: I1006 23:01:02.011096 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29329861-59gbv" event={"ID":"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6","Type":"ContainerStarted","Data":"0c5c9a703ae7dedfc78ccbf0698f5dd8d8c70a9a7f8dc10319af74aab1be7c14"} Oct 06 23:01:02 crc kubenswrapper[5014]: I1006 23:01:02.034672 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29329861-59gbv" podStartSLOduration=2.034614074 podStartE2EDuration="2.034614074s" podCreationTimestamp="2025-10-06 23:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:01:02.031158566 +0000 UTC m=+5407.324195340" watchObservedRunningTime="2025-10-06 23:01:02.034614074 +0000 UTC m=+5407.327650838" Oct 06 23:01:03 crc kubenswrapper[5014]: I1006 23:01:03.024999 5014 generic.go:334] "Generic (PLEG): container finished" podID="5ae5cdba-2926-4eb6-ba70-1c6b65be53f6" containerID="0c5c9a703ae7dedfc78ccbf0698f5dd8d8c70a9a7f8dc10319af74aab1be7c14" exitCode=0 Oct 06 23:01:03 crc kubenswrapper[5014]: I1006 23:01:03.025110 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29329861-59gbv" event={"ID":"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6","Type":"ContainerDied","Data":"0c5c9a703ae7dedfc78ccbf0698f5dd8d8c70a9a7f8dc10319af74aab1be7c14"} Oct 06 23:01:03 crc kubenswrapper[5014]: I1006 23:01:03.346383 5014 scope.go:117] "RemoveContainer" containerID="af4106729a78544128dadec8d86d2d5546afd1b211c4bb828e918d98cd1157cf" Oct 06 23:01:03 crc kubenswrapper[5014]: I1006 23:01:03.390161 5014 scope.go:117] "RemoveContainer" containerID="72c52a6401df1c76feeac8a62b43e0dff7c8b06dbb2b59d2e4fac76d33a441a0" Oct 06 23:01:03 crc kubenswrapper[5014]: I1006 23:01:03.428530 5014 scope.go:117] "RemoveContainer" containerID="bfc96154553144da4a7277d75d34655ef9da34eb76310f80b6b678fc49d4902b" Oct 06 23:01:03 crc kubenswrapper[5014]: I1006 23:01:03.474385 5014 scope.go:117] "RemoveContainer" containerID="95156c492eca6aa029a21746cfc24747b7de0d0b8aad42dea075ec296343e731" Oct 06 23:01:03 crc kubenswrapper[5014]: I1006 23:01:03.527351 5014 scope.go:117] "RemoveContainer" containerID="209526715e7f712200631547f00e59c0f67a1afabd212fd19b7e394add05073d" Oct 06 23:01:03 crc kubenswrapper[5014]: I1006 23:01:03.567570 5014 scope.go:117] "RemoveContainer" containerID="c5f6328fd1274d2918237fbf91acbfebe8e5f40d9f7a3f160048bfc5ab3529f2" Oct 06 23:01:03 crc kubenswrapper[5014]: I1006 23:01:03.602387 5014 scope.go:117] "RemoveContainer" containerID="1b6d9e60a4d5cd0c545344dd95387e69e0a6def9295eddbeeb2ac73004266c92" Oct 06 23:01:03 crc kubenswrapper[5014]: I1006 23:01:03.621998 5014 scope.go:117] "RemoveContainer" containerID="66db457509f91e6efa3a78d6013576053423c7db985ff0315d453497182e2549" Oct 06 23:01:04 crc kubenswrapper[5014]: I1006 23:01:04.326114 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29329861-59gbv" Oct 06 23:01:04 crc kubenswrapper[5014]: I1006 23:01:04.509112 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqrx7\" (UniqueName: \"kubernetes.io/projected/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-kube-api-access-gqrx7\") pod \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\" (UID: \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\") " Oct 06 23:01:04 crc kubenswrapper[5014]: I1006 23:01:04.510339 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-config-data\") pod \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\" (UID: \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\") " Oct 06 23:01:04 crc kubenswrapper[5014]: I1006 23:01:04.510456 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-fernet-keys\") pod \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\" (UID: \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\") " Oct 06 23:01:04 crc kubenswrapper[5014]: I1006 23:01:04.510481 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-combined-ca-bundle\") pod \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\" (UID: \"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6\") " Oct 06 23:01:04 crc kubenswrapper[5014]: I1006 23:01:04.519050 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-kube-api-access-gqrx7" (OuterVolumeSpecName: "kube-api-access-gqrx7") pod "5ae5cdba-2926-4eb6-ba70-1c6b65be53f6" (UID: "5ae5cdba-2926-4eb6-ba70-1c6b65be53f6"). InnerVolumeSpecName "kube-api-access-gqrx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:01:04 crc kubenswrapper[5014]: I1006 23:01:04.524018 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "5ae5cdba-2926-4eb6-ba70-1c6b65be53f6" (UID: "5ae5cdba-2926-4eb6-ba70-1c6b65be53f6"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:01:04 crc kubenswrapper[5014]: I1006 23:01:04.568816 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ae5cdba-2926-4eb6-ba70-1c6b65be53f6" (UID: "5ae5cdba-2926-4eb6-ba70-1c6b65be53f6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:01:04 crc kubenswrapper[5014]: I1006 23:01:04.576333 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-config-data" (OuterVolumeSpecName: "config-data") pod "5ae5cdba-2926-4eb6-ba70-1c6b65be53f6" (UID: "5ae5cdba-2926-4eb6-ba70-1c6b65be53f6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:01:04 crc kubenswrapper[5014]: I1006 23:01:04.612792 5014 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 06 23:01:04 crc kubenswrapper[5014]: I1006 23:01:04.612849 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 23:01:04 crc kubenswrapper[5014]: I1006 23:01:04.612871 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqrx7\" (UniqueName: \"kubernetes.io/projected/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-kube-api-access-gqrx7\") on node \"crc\" DevicePath \"\"" Oct 06 23:01:04 crc kubenswrapper[5014]: I1006 23:01:04.612891 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ae5cdba-2926-4eb6-ba70-1c6b65be53f6-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 23:01:05 crc kubenswrapper[5014]: I1006 23:01:05.066834 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29329861-59gbv" event={"ID":"5ae5cdba-2926-4eb6-ba70-1c6b65be53f6","Type":"ContainerDied","Data":"eb5d63d912bfd79871af0ff51dc963055c5a5a4a826930822f5a6d9b7d3c7a00"} Oct 06 23:01:05 crc kubenswrapper[5014]: I1006 23:01:05.066893 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb5d63d912bfd79871af0ff51dc963055c5a5a4a826930822f5a6d9b7d3c7a00" Oct 06 23:01:05 crc kubenswrapper[5014]: I1006 23:01:05.066947 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29329861-59gbv" Oct 06 23:01:06 crc kubenswrapper[5014]: I1006 23:01:06.484709 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 23:01:06 crc kubenswrapper[5014]: E1006 23:01:06.485329 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:01:21 crc kubenswrapper[5014]: I1006 23:01:21.485295 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 23:01:21 crc kubenswrapper[5014]: E1006 23:01:21.485989 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:01:36 crc kubenswrapper[5014]: I1006 23:01:36.484443 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 23:01:36 crc kubenswrapper[5014]: E1006 23:01:36.485363 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:01:51 crc kubenswrapper[5014]: I1006 23:01:51.485276 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 23:01:51 crc kubenswrapper[5014]: E1006 23:01:51.486593 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:02:03 crc kubenswrapper[5014]: I1006 23:02:03.484870 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 23:02:03 crc kubenswrapper[5014]: E1006 23:02:03.486253 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:02:18 crc kubenswrapper[5014]: I1006 23:02:18.484823 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 23:02:18 crc kubenswrapper[5014]: E1006 23:02:18.486286 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:02:24 crc kubenswrapper[5014]: I1006 23:02:24.343819 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-ftfn9"] Oct 06 23:02:24 crc kubenswrapper[5014]: E1006 23:02:24.345029 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ae5cdba-2926-4eb6-ba70-1c6b65be53f6" containerName="keystone-cron" Oct 06 23:02:24 crc kubenswrapper[5014]: I1006 23:02:24.345046 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ae5cdba-2926-4eb6-ba70-1c6b65be53f6" containerName="keystone-cron" Oct 06 23:02:24 crc kubenswrapper[5014]: I1006 23:02:24.345264 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ae5cdba-2926-4eb6-ba70-1c6b65be53f6" containerName="keystone-cron" Oct 06 23:02:24 crc kubenswrapper[5014]: I1006 23:02:24.345971 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-ftfn9" Oct 06 23:02:24 crc kubenswrapper[5014]: I1006 23:02:24.353172 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-ftfn9"] Oct 06 23:02:24 crc kubenswrapper[5014]: I1006 23:02:24.486816 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xf6km\" (UniqueName: \"kubernetes.io/projected/af485524-a88f-45e0-ac5a-73a702c8ab82-kube-api-access-xf6km\") pod \"barbican-db-create-ftfn9\" (UID: \"af485524-a88f-45e0-ac5a-73a702c8ab82\") " pod="openstack/barbican-db-create-ftfn9" Oct 06 23:02:24 crc kubenswrapper[5014]: I1006 23:02:24.588777 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xf6km\" (UniqueName: \"kubernetes.io/projected/af485524-a88f-45e0-ac5a-73a702c8ab82-kube-api-access-xf6km\") pod \"barbican-db-create-ftfn9\" (UID: \"af485524-a88f-45e0-ac5a-73a702c8ab82\") " pod="openstack/barbican-db-create-ftfn9" Oct 06 23:02:24 crc kubenswrapper[5014]: I1006 23:02:24.618166 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xf6km\" (UniqueName: \"kubernetes.io/projected/af485524-a88f-45e0-ac5a-73a702c8ab82-kube-api-access-xf6km\") pod \"barbican-db-create-ftfn9\" (UID: \"af485524-a88f-45e0-ac5a-73a702c8ab82\") " pod="openstack/barbican-db-create-ftfn9" Oct 06 23:02:24 crc kubenswrapper[5014]: I1006 23:02:24.715031 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-ftfn9" Oct 06 23:02:25 crc kubenswrapper[5014]: I1006 23:02:25.181771 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-ftfn9"] Oct 06 23:02:25 crc kubenswrapper[5014]: W1006 23:02:25.185702 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf485524_a88f_45e0_ac5a_73a702c8ab82.slice/crio-d3bde6fd64ba59e4d30ce94258ae4d81e517caf956665958913a048a4ce8ea5d WatchSource:0}: Error finding container d3bde6fd64ba59e4d30ce94258ae4d81e517caf956665958913a048a4ce8ea5d: Status 404 returned error can't find the container with id d3bde6fd64ba59e4d30ce94258ae4d81e517caf956665958913a048a4ce8ea5d Oct 06 23:02:25 crc kubenswrapper[5014]: I1006 23:02:25.932803 5014 generic.go:334] "Generic (PLEG): container finished" podID="af485524-a88f-45e0-ac5a-73a702c8ab82" containerID="324f4111201344d9e3644e98f0c0578b53a1a7e9be298e2449cfeafb24fc4b5e" exitCode=0 Oct 06 23:02:25 crc kubenswrapper[5014]: I1006 23:02:25.932917 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-ftfn9" event={"ID":"af485524-a88f-45e0-ac5a-73a702c8ab82","Type":"ContainerDied","Data":"324f4111201344d9e3644e98f0c0578b53a1a7e9be298e2449cfeafb24fc4b5e"} Oct 06 23:02:25 crc kubenswrapper[5014]: I1006 23:02:25.933762 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-ftfn9" event={"ID":"af485524-a88f-45e0-ac5a-73a702c8ab82","Type":"ContainerStarted","Data":"d3bde6fd64ba59e4d30ce94258ae4d81e517caf956665958913a048a4ce8ea5d"} Oct 06 23:02:27 crc kubenswrapper[5014]: I1006 23:02:27.374976 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-ftfn9" Oct 06 23:02:27 crc kubenswrapper[5014]: I1006 23:02:27.545748 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xf6km\" (UniqueName: \"kubernetes.io/projected/af485524-a88f-45e0-ac5a-73a702c8ab82-kube-api-access-xf6km\") pod \"af485524-a88f-45e0-ac5a-73a702c8ab82\" (UID: \"af485524-a88f-45e0-ac5a-73a702c8ab82\") " Oct 06 23:02:27 crc kubenswrapper[5014]: I1006 23:02:27.554818 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af485524-a88f-45e0-ac5a-73a702c8ab82-kube-api-access-xf6km" (OuterVolumeSpecName: "kube-api-access-xf6km") pod "af485524-a88f-45e0-ac5a-73a702c8ab82" (UID: "af485524-a88f-45e0-ac5a-73a702c8ab82"). InnerVolumeSpecName "kube-api-access-xf6km". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:02:27 crc kubenswrapper[5014]: I1006 23:02:27.648374 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xf6km\" (UniqueName: \"kubernetes.io/projected/af485524-a88f-45e0-ac5a-73a702c8ab82-kube-api-access-xf6km\") on node \"crc\" DevicePath \"\"" Oct 06 23:02:27 crc kubenswrapper[5014]: I1006 23:02:27.960172 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-ftfn9" event={"ID":"af485524-a88f-45e0-ac5a-73a702c8ab82","Type":"ContainerDied","Data":"d3bde6fd64ba59e4d30ce94258ae4d81e517caf956665958913a048a4ce8ea5d"} Oct 06 23:02:27 crc kubenswrapper[5014]: I1006 23:02:27.960226 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d3bde6fd64ba59e4d30ce94258ae4d81e517caf956665958913a048a4ce8ea5d" Oct 06 23:02:27 crc kubenswrapper[5014]: I1006 23:02:27.960713 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-ftfn9" Oct 06 23:02:31 crc kubenswrapper[5014]: I1006 23:02:31.485236 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 23:02:31 crc kubenswrapper[5014]: E1006 23:02:31.486003 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:02:34 crc kubenswrapper[5014]: I1006 23:02:34.368930 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-41d5-account-create-gfwn5"] Oct 06 23:02:34 crc kubenswrapper[5014]: E1006 23:02:34.369990 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af485524-a88f-45e0-ac5a-73a702c8ab82" containerName="mariadb-database-create" Oct 06 23:02:34 crc kubenswrapper[5014]: I1006 23:02:34.370004 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="af485524-a88f-45e0-ac5a-73a702c8ab82" containerName="mariadb-database-create" Oct 06 23:02:34 crc kubenswrapper[5014]: I1006 23:02:34.370415 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="af485524-a88f-45e0-ac5a-73a702c8ab82" containerName="mariadb-database-create" Oct 06 23:02:34 crc kubenswrapper[5014]: I1006 23:02:34.371331 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-41d5-account-create-gfwn5" Oct 06 23:02:34 crc kubenswrapper[5014]: I1006 23:02:34.373929 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Oct 06 23:02:34 crc kubenswrapper[5014]: I1006 23:02:34.404906 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-41d5-account-create-gfwn5"] Oct 06 23:02:34 crc kubenswrapper[5014]: I1006 23:02:34.495750 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zx6r\" (UniqueName: \"kubernetes.io/projected/7f71f2dc-8f61-41db-81bb-929ab9e4676e-kube-api-access-7zx6r\") pod \"barbican-41d5-account-create-gfwn5\" (UID: \"7f71f2dc-8f61-41db-81bb-929ab9e4676e\") " pod="openstack/barbican-41d5-account-create-gfwn5" Oct 06 23:02:34 crc kubenswrapper[5014]: I1006 23:02:34.597360 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zx6r\" (UniqueName: \"kubernetes.io/projected/7f71f2dc-8f61-41db-81bb-929ab9e4676e-kube-api-access-7zx6r\") pod \"barbican-41d5-account-create-gfwn5\" (UID: \"7f71f2dc-8f61-41db-81bb-929ab9e4676e\") " pod="openstack/barbican-41d5-account-create-gfwn5" Oct 06 23:02:34 crc kubenswrapper[5014]: I1006 23:02:34.634058 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zx6r\" (UniqueName: \"kubernetes.io/projected/7f71f2dc-8f61-41db-81bb-929ab9e4676e-kube-api-access-7zx6r\") pod \"barbican-41d5-account-create-gfwn5\" (UID: \"7f71f2dc-8f61-41db-81bb-929ab9e4676e\") " pod="openstack/barbican-41d5-account-create-gfwn5" Oct 06 23:02:34 crc kubenswrapper[5014]: I1006 23:02:34.709394 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-41d5-account-create-gfwn5" Oct 06 23:02:35 crc kubenswrapper[5014]: I1006 23:02:35.097146 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-41d5-account-create-gfwn5"] Oct 06 23:02:36 crc kubenswrapper[5014]: I1006 23:02:36.064465 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-41d5-account-create-gfwn5" event={"ID":"7f71f2dc-8f61-41db-81bb-929ab9e4676e","Type":"ContainerDied","Data":"4add3fbee4252b760fa319bfaa595969c5a1c2445c40eb0cae5def0cc8204539"} Oct 06 23:02:36 crc kubenswrapper[5014]: I1006 23:02:36.064253 5014 generic.go:334] "Generic (PLEG): container finished" podID="7f71f2dc-8f61-41db-81bb-929ab9e4676e" containerID="4add3fbee4252b760fa319bfaa595969c5a1c2445c40eb0cae5def0cc8204539" exitCode=0 Oct 06 23:02:36 crc kubenswrapper[5014]: I1006 23:02:36.064968 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-41d5-account-create-gfwn5" event={"ID":"7f71f2dc-8f61-41db-81bb-929ab9e4676e","Type":"ContainerStarted","Data":"a7e1330651613dc423eb5a6344c445ceff398c0247f4e2fe2a67d45579379285"} Oct 06 23:02:37 crc kubenswrapper[5014]: I1006 23:02:37.512798 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-41d5-account-create-gfwn5" Oct 06 23:02:37 crc kubenswrapper[5014]: I1006 23:02:37.551183 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zx6r\" (UniqueName: \"kubernetes.io/projected/7f71f2dc-8f61-41db-81bb-929ab9e4676e-kube-api-access-7zx6r\") pod \"7f71f2dc-8f61-41db-81bb-929ab9e4676e\" (UID: \"7f71f2dc-8f61-41db-81bb-929ab9e4676e\") " Oct 06 23:02:37 crc kubenswrapper[5014]: I1006 23:02:37.561926 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f71f2dc-8f61-41db-81bb-929ab9e4676e-kube-api-access-7zx6r" (OuterVolumeSpecName: "kube-api-access-7zx6r") pod "7f71f2dc-8f61-41db-81bb-929ab9e4676e" (UID: "7f71f2dc-8f61-41db-81bb-929ab9e4676e"). InnerVolumeSpecName "kube-api-access-7zx6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:02:37 crc kubenswrapper[5014]: I1006 23:02:37.652408 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zx6r\" (UniqueName: \"kubernetes.io/projected/7f71f2dc-8f61-41db-81bb-929ab9e4676e-kube-api-access-7zx6r\") on node \"crc\" DevicePath \"\"" Oct 06 23:02:38 crc kubenswrapper[5014]: I1006 23:02:38.088921 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-41d5-account-create-gfwn5" event={"ID":"7f71f2dc-8f61-41db-81bb-929ab9e4676e","Type":"ContainerDied","Data":"a7e1330651613dc423eb5a6344c445ceff398c0247f4e2fe2a67d45579379285"} Oct 06 23:02:38 crc kubenswrapper[5014]: I1006 23:02:38.088982 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a7e1330651613dc423eb5a6344c445ceff398c0247f4e2fe2a67d45579379285" Oct 06 23:02:38 crc kubenswrapper[5014]: I1006 23:02:38.089069 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-41d5-account-create-gfwn5" Oct 06 23:02:39 crc kubenswrapper[5014]: I1006 23:02:39.688924 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-sg4w2"] Oct 06 23:02:39 crc kubenswrapper[5014]: E1006 23:02:39.689764 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f71f2dc-8f61-41db-81bb-929ab9e4676e" containerName="mariadb-account-create" Oct 06 23:02:39 crc kubenswrapper[5014]: I1006 23:02:39.689786 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f71f2dc-8f61-41db-81bb-929ab9e4676e" containerName="mariadb-account-create" Oct 06 23:02:39 crc kubenswrapper[5014]: I1006 23:02:39.690113 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f71f2dc-8f61-41db-81bb-929ab9e4676e" containerName="mariadb-account-create" Oct 06 23:02:39 crc kubenswrapper[5014]: I1006 23:02:39.691031 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-sg4w2" Oct 06 23:02:39 crc kubenswrapper[5014]: I1006 23:02:39.693072 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4zf4\" (UniqueName: \"kubernetes.io/projected/0407dde2-0230-4190-bf9b-75d194fbf559-kube-api-access-w4zf4\") pod \"barbican-db-sync-sg4w2\" (UID: \"0407dde2-0230-4190-bf9b-75d194fbf559\") " pod="openstack/barbican-db-sync-sg4w2" Oct 06 23:02:39 crc kubenswrapper[5014]: I1006 23:02:39.693207 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0407dde2-0230-4190-bf9b-75d194fbf559-db-sync-config-data\") pod \"barbican-db-sync-sg4w2\" (UID: \"0407dde2-0230-4190-bf9b-75d194fbf559\") " pod="openstack/barbican-db-sync-sg4w2" Oct 06 23:02:39 crc kubenswrapper[5014]: I1006 23:02:39.693340 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0407dde2-0230-4190-bf9b-75d194fbf559-combined-ca-bundle\") pod \"barbican-db-sync-sg4w2\" (UID: \"0407dde2-0230-4190-bf9b-75d194fbf559\") " pod="openstack/barbican-db-sync-sg4w2" Oct 06 23:02:39 crc kubenswrapper[5014]: I1006 23:02:39.694328 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Oct 06 23:02:39 crc kubenswrapper[5014]: I1006 23:02:39.695186 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-xzcwd" Oct 06 23:02:39 crc kubenswrapper[5014]: I1006 23:02:39.711915 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-sg4w2"] Oct 06 23:02:39 crc kubenswrapper[5014]: I1006 23:02:39.795692 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4zf4\" (UniqueName: \"kubernetes.io/projected/0407dde2-0230-4190-bf9b-75d194fbf559-kube-api-access-w4zf4\") pod \"barbican-db-sync-sg4w2\" (UID: \"0407dde2-0230-4190-bf9b-75d194fbf559\") " pod="openstack/barbican-db-sync-sg4w2" Oct 06 23:02:39 crc kubenswrapper[5014]: I1006 23:02:39.795782 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0407dde2-0230-4190-bf9b-75d194fbf559-db-sync-config-data\") pod \"barbican-db-sync-sg4w2\" (UID: \"0407dde2-0230-4190-bf9b-75d194fbf559\") " pod="openstack/barbican-db-sync-sg4w2" Oct 06 23:02:39 crc kubenswrapper[5014]: I1006 23:02:39.795856 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0407dde2-0230-4190-bf9b-75d194fbf559-combined-ca-bundle\") pod \"barbican-db-sync-sg4w2\" (UID: \"0407dde2-0230-4190-bf9b-75d194fbf559\") " pod="openstack/barbican-db-sync-sg4w2" Oct 06 23:02:39 crc kubenswrapper[5014]: I1006 23:02:39.804870 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0407dde2-0230-4190-bf9b-75d194fbf559-db-sync-config-data\") pod \"barbican-db-sync-sg4w2\" (UID: \"0407dde2-0230-4190-bf9b-75d194fbf559\") " pod="openstack/barbican-db-sync-sg4w2" Oct 06 23:02:39 crc kubenswrapper[5014]: I1006 23:02:39.808109 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0407dde2-0230-4190-bf9b-75d194fbf559-combined-ca-bundle\") pod \"barbican-db-sync-sg4w2\" (UID: \"0407dde2-0230-4190-bf9b-75d194fbf559\") " pod="openstack/barbican-db-sync-sg4w2" Oct 06 23:02:39 crc kubenswrapper[5014]: I1006 23:02:39.823249 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4zf4\" (UniqueName: \"kubernetes.io/projected/0407dde2-0230-4190-bf9b-75d194fbf559-kube-api-access-w4zf4\") pod \"barbican-db-sync-sg4w2\" (UID: \"0407dde2-0230-4190-bf9b-75d194fbf559\") " pod="openstack/barbican-db-sync-sg4w2" Oct 06 23:02:40 crc kubenswrapper[5014]: I1006 23:02:40.020278 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-sg4w2" Oct 06 23:02:40 crc kubenswrapper[5014]: I1006 23:02:40.518139 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-sg4w2"] Oct 06 23:02:41 crc kubenswrapper[5014]: I1006 23:02:41.121973 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-sg4w2" event={"ID":"0407dde2-0230-4190-bf9b-75d194fbf559","Type":"ContainerStarted","Data":"d4db18cc151146afdf00c862d11232133326efa714d62270fb22727224d54e0d"} Oct 06 23:02:41 crc kubenswrapper[5014]: I1006 23:02:41.122374 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-sg4w2" event={"ID":"0407dde2-0230-4190-bf9b-75d194fbf559","Type":"ContainerStarted","Data":"bd2dfbf476fb9185198ab50d159937b17108dce60ebc0a29f142c976ea071e4c"} Oct 06 23:02:41 crc kubenswrapper[5014]: I1006 23:02:41.147133 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-sg4w2" podStartSLOduration=2.147111817 podStartE2EDuration="2.147111817s" podCreationTimestamp="2025-10-06 23:02:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:02:41.145240068 +0000 UTC m=+5506.438276812" watchObservedRunningTime="2025-10-06 23:02:41.147111817 +0000 UTC m=+5506.440148591" Oct 06 23:02:43 crc kubenswrapper[5014]: I1006 23:02:43.150037 5014 generic.go:334] "Generic (PLEG): container finished" podID="0407dde2-0230-4190-bf9b-75d194fbf559" containerID="d4db18cc151146afdf00c862d11232133326efa714d62270fb22727224d54e0d" exitCode=0 Oct 06 23:02:43 crc kubenswrapper[5014]: I1006 23:02:43.150103 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-sg4w2" event={"ID":"0407dde2-0230-4190-bf9b-75d194fbf559","Type":"ContainerDied","Data":"d4db18cc151146afdf00c862d11232133326efa714d62270fb22727224d54e0d"} Oct 06 23:02:44 crc kubenswrapper[5014]: I1006 23:02:44.607240 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-sg4w2" Oct 06 23:02:44 crc kubenswrapper[5014]: I1006 23:02:44.711044 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0407dde2-0230-4190-bf9b-75d194fbf559-combined-ca-bundle\") pod \"0407dde2-0230-4190-bf9b-75d194fbf559\" (UID: \"0407dde2-0230-4190-bf9b-75d194fbf559\") " Oct 06 23:02:44 crc kubenswrapper[5014]: I1006 23:02:44.711285 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0407dde2-0230-4190-bf9b-75d194fbf559-db-sync-config-data\") pod \"0407dde2-0230-4190-bf9b-75d194fbf559\" (UID: \"0407dde2-0230-4190-bf9b-75d194fbf559\") " Oct 06 23:02:44 crc kubenswrapper[5014]: I1006 23:02:44.711328 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4zf4\" (UniqueName: \"kubernetes.io/projected/0407dde2-0230-4190-bf9b-75d194fbf559-kube-api-access-w4zf4\") pod \"0407dde2-0230-4190-bf9b-75d194fbf559\" (UID: \"0407dde2-0230-4190-bf9b-75d194fbf559\") " Oct 06 23:02:44 crc kubenswrapper[5014]: I1006 23:02:44.718739 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0407dde2-0230-4190-bf9b-75d194fbf559-kube-api-access-w4zf4" (OuterVolumeSpecName: "kube-api-access-w4zf4") pod "0407dde2-0230-4190-bf9b-75d194fbf559" (UID: "0407dde2-0230-4190-bf9b-75d194fbf559"). InnerVolumeSpecName "kube-api-access-w4zf4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:02:44 crc kubenswrapper[5014]: I1006 23:02:44.718754 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0407dde2-0230-4190-bf9b-75d194fbf559-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "0407dde2-0230-4190-bf9b-75d194fbf559" (UID: "0407dde2-0230-4190-bf9b-75d194fbf559"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:02:44 crc kubenswrapper[5014]: I1006 23:02:44.736840 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0407dde2-0230-4190-bf9b-75d194fbf559-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0407dde2-0230-4190-bf9b-75d194fbf559" (UID: "0407dde2-0230-4190-bf9b-75d194fbf559"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:02:44 crc kubenswrapper[5014]: I1006 23:02:44.814097 5014 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0407dde2-0230-4190-bf9b-75d194fbf559-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 23:02:44 crc kubenswrapper[5014]: I1006 23:02:44.814163 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4zf4\" (UniqueName: \"kubernetes.io/projected/0407dde2-0230-4190-bf9b-75d194fbf559-kube-api-access-w4zf4\") on node \"crc\" DevicePath \"\"" Oct 06 23:02:44 crc kubenswrapper[5014]: I1006 23:02:44.814194 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0407dde2-0230-4190-bf9b-75d194fbf559-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.178231 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-sg4w2" event={"ID":"0407dde2-0230-4190-bf9b-75d194fbf559","Type":"ContainerDied","Data":"bd2dfbf476fb9185198ab50d159937b17108dce60ebc0a29f142c976ea071e4c"} Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.178292 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd2dfbf476fb9185198ab50d159937b17108dce60ebc0a29f142c976ea071e4c" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.178349 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-sg4w2" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.478898 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-79d6c6fc67-tshsj"] Oct 06 23:02:45 crc kubenswrapper[5014]: E1006 23:02:45.479457 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0407dde2-0230-4190-bf9b-75d194fbf559" containerName="barbican-db-sync" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.479472 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="0407dde2-0230-4190-bf9b-75d194fbf559" containerName="barbican-db-sync" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.479830 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="0407dde2-0230-4190-bf9b-75d194fbf559" containerName="barbican-db-sync" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.480651 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.484589 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.484846 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-xzcwd" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.484976 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.517279 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-7bddb697b-t7597"] Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.521537 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.531667 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.532111 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-79d6c6fc67-tshsj"] Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.533585 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e90d3a14-2789-4684-998e-c392eddebbb6-config-data\") pod \"barbican-worker-79d6c6fc67-tshsj\" (UID: \"e90d3a14-2789-4684-998e-c392eddebbb6\") " pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.533665 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ndbn\" (UniqueName: \"kubernetes.io/projected/e90d3a14-2789-4684-998e-c392eddebbb6-kube-api-access-4ndbn\") pod \"barbican-worker-79d6c6fc67-tshsj\" (UID: \"e90d3a14-2789-4684-998e-c392eddebbb6\") " pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.533704 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e90d3a14-2789-4684-998e-c392eddebbb6-logs\") pod \"barbican-worker-79d6c6fc67-tshsj\" (UID: \"e90d3a14-2789-4684-998e-c392eddebbb6\") " pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.533728 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9940507-c903-4972-8914-2727614b0e7c-logs\") pod \"barbican-keystone-listener-7bddb697b-t7597\" (UID: \"d9940507-c903-4972-8914-2727614b0e7c\") " pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.533745 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9940507-c903-4972-8914-2727614b0e7c-config-data\") pod \"barbican-keystone-listener-7bddb697b-t7597\" (UID: \"d9940507-c903-4972-8914-2727614b0e7c\") " pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.533769 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-899lr\" (UniqueName: \"kubernetes.io/projected/d9940507-c903-4972-8914-2727614b0e7c-kube-api-access-899lr\") pod \"barbican-keystone-listener-7bddb697b-t7597\" (UID: \"d9940507-c903-4972-8914-2727614b0e7c\") " pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.533809 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9940507-c903-4972-8914-2727614b0e7c-combined-ca-bundle\") pod \"barbican-keystone-listener-7bddb697b-t7597\" (UID: \"d9940507-c903-4972-8914-2727614b0e7c\") " pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.533835 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d9940507-c903-4972-8914-2727614b0e7c-config-data-custom\") pod \"barbican-keystone-listener-7bddb697b-t7597\" (UID: \"d9940507-c903-4972-8914-2727614b0e7c\") " pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.533855 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e90d3a14-2789-4684-998e-c392eddebbb6-combined-ca-bundle\") pod \"barbican-worker-79d6c6fc67-tshsj\" (UID: \"e90d3a14-2789-4684-998e-c392eddebbb6\") " pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.533874 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e90d3a14-2789-4684-998e-c392eddebbb6-config-data-custom\") pod \"barbican-worker-79d6c6fc67-tshsj\" (UID: \"e90d3a14-2789-4684-998e-c392eddebbb6\") " pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.553907 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7bddb697b-t7597"] Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.579040 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-c5b46b9d5-5v5cj"] Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.582019 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.610271 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c5b46b9d5-5v5cj"] Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.635904 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9940507-c903-4972-8914-2727614b0e7c-config-data\") pod \"barbican-keystone-listener-7bddb697b-t7597\" (UID: \"d9940507-c903-4972-8914-2727614b0e7c\") " pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.635953 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-config\") pod \"dnsmasq-dns-c5b46b9d5-5v5cj\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.635981 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-899lr\" (UniqueName: \"kubernetes.io/projected/d9940507-c903-4972-8914-2727614b0e7c-kube-api-access-899lr\") pod \"barbican-keystone-listener-7bddb697b-t7597\" (UID: \"d9940507-c903-4972-8914-2727614b0e7c\") " pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.636006 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9940507-c903-4972-8914-2727614b0e7c-combined-ca-bundle\") pod \"barbican-keystone-listener-7bddb697b-t7597\" (UID: \"d9940507-c903-4972-8914-2727614b0e7c\") " pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.636029 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqldn\" (UniqueName: \"kubernetes.io/projected/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-kube-api-access-gqldn\") pod \"dnsmasq-dns-c5b46b9d5-5v5cj\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.636050 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d9940507-c903-4972-8914-2727614b0e7c-config-data-custom\") pod \"barbican-keystone-listener-7bddb697b-t7597\" (UID: \"d9940507-c903-4972-8914-2727614b0e7c\") " pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.636070 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e90d3a14-2789-4684-998e-c392eddebbb6-combined-ca-bundle\") pod \"barbican-worker-79d6c6fc67-tshsj\" (UID: \"e90d3a14-2789-4684-998e-c392eddebbb6\") " pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.636089 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e90d3a14-2789-4684-998e-c392eddebbb6-config-data-custom\") pod \"barbican-worker-79d6c6fc67-tshsj\" (UID: \"e90d3a14-2789-4684-998e-c392eddebbb6\") " pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.636129 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e90d3a14-2789-4684-998e-c392eddebbb6-config-data\") pod \"barbican-worker-79d6c6fc67-tshsj\" (UID: \"e90d3a14-2789-4684-998e-c392eddebbb6\") " pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.636163 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-ovsdbserver-sb\") pod \"dnsmasq-dns-c5b46b9d5-5v5cj\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.636184 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-ovsdbserver-nb\") pod \"dnsmasq-dns-c5b46b9d5-5v5cj\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.636211 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ndbn\" (UniqueName: \"kubernetes.io/projected/e90d3a14-2789-4684-998e-c392eddebbb6-kube-api-access-4ndbn\") pod \"barbican-worker-79d6c6fc67-tshsj\" (UID: \"e90d3a14-2789-4684-998e-c392eddebbb6\") " pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.636242 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e90d3a14-2789-4684-998e-c392eddebbb6-logs\") pod \"barbican-worker-79d6c6fc67-tshsj\" (UID: \"e90d3a14-2789-4684-998e-c392eddebbb6\") " pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.636267 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-dns-svc\") pod \"dnsmasq-dns-c5b46b9d5-5v5cj\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.636302 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9940507-c903-4972-8914-2727614b0e7c-logs\") pod \"barbican-keystone-listener-7bddb697b-t7597\" (UID: \"d9940507-c903-4972-8914-2727614b0e7c\") " pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.636801 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9940507-c903-4972-8914-2727614b0e7c-logs\") pod \"barbican-keystone-listener-7bddb697b-t7597\" (UID: \"d9940507-c903-4972-8914-2727614b0e7c\") " pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.638969 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e90d3a14-2789-4684-998e-c392eddebbb6-logs\") pod \"barbican-worker-79d6c6fc67-tshsj\" (UID: \"e90d3a14-2789-4684-998e-c392eddebbb6\") " pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.645190 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9940507-c903-4972-8914-2727614b0e7c-config-data\") pod \"barbican-keystone-listener-7bddb697b-t7597\" (UID: \"d9940507-c903-4972-8914-2727614b0e7c\") " pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.647177 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d9940507-c903-4972-8914-2727614b0e7c-config-data-custom\") pod \"barbican-keystone-listener-7bddb697b-t7597\" (UID: \"d9940507-c903-4972-8914-2727614b0e7c\") " pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.647235 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e90d3a14-2789-4684-998e-c392eddebbb6-combined-ca-bundle\") pod \"barbican-worker-79d6c6fc67-tshsj\" (UID: \"e90d3a14-2789-4684-998e-c392eddebbb6\") " pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.651746 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e90d3a14-2789-4684-998e-c392eddebbb6-config-data\") pod \"barbican-worker-79d6c6fc67-tshsj\" (UID: \"e90d3a14-2789-4684-998e-c392eddebbb6\") " pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.655994 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e90d3a14-2789-4684-998e-c392eddebbb6-config-data-custom\") pod \"barbican-worker-79d6c6fc67-tshsj\" (UID: \"e90d3a14-2789-4684-998e-c392eddebbb6\") " pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.657724 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-899lr\" (UniqueName: \"kubernetes.io/projected/d9940507-c903-4972-8914-2727614b0e7c-kube-api-access-899lr\") pod \"barbican-keystone-listener-7bddb697b-t7597\" (UID: \"d9940507-c903-4972-8914-2727614b0e7c\") " pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.658882 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ndbn\" (UniqueName: \"kubernetes.io/projected/e90d3a14-2789-4684-998e-c392eddebbb6-kube-api-access-4ndbn\") pod \"barbican-worker-79d6c6fc67-tshsj\" (UID: \"e90d3a14-2789-4684-998e-c392eddebbb6\") " pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.680442 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9940507-c903-4972-8914-2727614b0e7c-combined-ca-bundle\") pod \"barbican-keystone-listener-7bddb697b-t7597\" (UID: \"d9940507-c903-4972-8914-2727614b0e7c\") " pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.696789 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6c885cf86d-7mdwj"] Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.698122 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.704333 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.718701 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6c885cf86d-7mdwj"] Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.738058 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-dns-svc\") pod \"dnsmasq-dns-c5b46b9d5-5v5cj\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.738124 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-config\") pod \"dnsmasq-dns-c5b46b9d5-5v5cj\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.738716 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqldn\" (UniqueName: \"kubernetes.io/projected/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-kube-api-access-gqldn\") pod \"dnsmasq-dns-c5b46b9d5-5v5cj\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.738871 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-ovsdbserver-sb\") pod \"dnsmasq-dns-c5b46b9d5-5v5cj\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.738894 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-ovsdbserver-nb\") pod \"dnsmasq-dns-c5b46b9d5-5v5cj\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.738989 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-dns-svc\") pod \"dnsmasq-dns-c5b46b9d5-5v5cj\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.739102 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-config\") pod \"dnsmasq-dns-c5b46b9d5-5v5cj\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.739525 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-ovsdbserver-nb\") pod \"dnsmasq-dns-c5b46b9d5-5v5cj\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.739861 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-ovsdbserver-sb\") pod \"dnsmasq-dns-c5b46b9d5-5v5cj\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.764358 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqldn\" (UniqueName: \"kubernetes.io/projected/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-kube-api-access-gqldn\") pod \"dnsmasq-dns-c5b46b9d5-5v5cj\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.816946 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-79d6c6fc67-tshsj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.842779 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-config-data-custom\") pod \"barbican-api-6c885cf86d-7mdwj\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.843003 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e06055a5-b160-46cc-bd61-a0ea4c766a1c-logs\") pod \"barbican-api-6c885cf86d-7mdwj\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.843070 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-combined-ca-bundle\") pod \"barbican-api-6c885cf86d-7mdwj\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.843248 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-config-data\") pod \"barbican-api-6c885cf86d-7mdwj\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.843313 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghljt\" (UniqueName: \"kubernetes.io/projected/e06055a5-b160-46cc-bd61-a0ea4c766a1c-kube-api-access-ghljt\") pod \"barbican-api-6c885cf86d-7mdwj\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.869643 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7bddb697b-t7597" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.920061 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.944814 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-config-data-custom\") pod \"barbican-api-6c885cf86d-7mdwj\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.944905 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e06055a5-b160-46cc-bd61-a0ea4c766a1c-logs\") pod \"barbican-api-6c885cf86d-7mdwj\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.944930 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-combined-ca-bundle\") pod \"barbican-api-6c885cf86d-7mdwj\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.944978 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-config-data\") pod \"barbican-api-6c885cf86d-7mdwj\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.944993 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghljt\" (UniqueName: \"kubernetes.io/projected/e06055a5-b160-46cc-bd61-a0ea4c766a1c-kube-api-access-ghljt\") pod \"barbican-api-6c885cf86d-7mdwj\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.945962 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e06055a5-b160-46cc-bd61-a0ea4c766a1c-logs\") pod \"barbican-api-6c885cf86d-7mdwj\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.954019 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-config-data\") pod \"barbican-api-6c885cf86d-7mdwj\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.959916 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-combined-ca-bundle\") pod \"barbican-api-6c885cf86d-7mdwj\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.960095 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-config-data-custom\") pod \"barbican-api-6c885cf86d-7mdwj\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:45 crc kubenswrapper[5014]: I1006 23:02:45.981974 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghljt\" (UniqueName: \"kubernetes.io/projected/e06055a5-b160-46cc-bd61-a0ea4c766a1c-kube-api-access-ghljt\") pod \"barbican-api-6c885cf86d-7mdwj\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:46 crc kubenswrapper[5014]: I1006 23:02:46.034695 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:46 crc kubenswrapper[5014]: I1006 23:02:46.345463 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-79d6c6fc67-tshsj"] Oct 06 23:02:46 crc kubenswrapper[5014]: I1006 23:02:46.432393 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7bddb697b-t7597"] Oct 06 23:02:46 crc kubenswrapper[5014]: I1006 23:02:46.484358 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 23:02:46 crc kubenswrapper[5014]: E1006 23:02:46.486725 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:02:46 crc kubenswrapper[5014]: I1006 23:02:46.588893 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6c885cf86d-7mdwj"] Oct 06 23:02:46 crc kubenswrapper[5014]: I1006 23:02:46.598794 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c5b46b9d5-5v5cj"] Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.198710 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7bddb697b-t7597" event={"ID":"d9940507-c903-4972-8914-2727614b0e7c","Type":"ContainerStarted","Data":"3cd71b0b963095e72b71e1bede3f766654ed56b56d80adf656a070bdd1c97c0b"} Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.199177 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7bddb697b-t7597" event={"ID":"d9940507-c903-4972-8914-2727614b0e7c","Type":"ContainerStarted","Data":"d5435b856b553a28906ad0d98b11c91463623a70f72f6a5f7ca13be7e9967587"} Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.199192 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7bddb697b-t7597" event={"ID":"d9940507-c903-4972-8914-2727614b0e7c","Type":"ContainerStarted","Data":"391e6ae44654170a208f1095a484c788a14b49085fb5e89452f009db00520796"} Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.200585 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6c885cf86d-7mdwj" event={"ID":"e06055a5-b160-46cc-bd61-a0ea4c766a1c","Type":"ContainerStarted","Data":"5d812cb832c50f2f08e16ae1851df92680c865e5a6d36d5b74f9ff4e3ddb3ac5"} Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.200661 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6c885cf86d-7mdwj" event={"ID":"e06055a5-b160-46cc-bd61-a0ea4c766a1c","Type":"ContainerStarted","Data":"104e9e0153de02ac3f695a9c10290943a45f3e8879ec7f9d4e266e078ff569f2"} Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.200676 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.200686 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6c885cf86d-7mdwj" event={"ID":"e06055a5-b160-46cc-bd61-a0ea4c766a1c","Type":"ContainerStarted","Data":"b83cc864bccb7f17f3680df6042440837f55389a7448e0c09a75776692ad3ae1"} Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.200747 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.202343 5014 generic.go:334] "Generic (PLEG): container finished" podID="b05d4daf-38d1-4aa6-a4b3-a623bad4543f" containerID="cd0cc088dd0b537b2125fb8069d7f0ebf10ced483eb09460b1652461e763fa30" exitCode=0 Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.202386 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" event={"ID":"b05d4daf-38d1-4aa6-a4b3-a623bad4543f","Type":"ContainerDied","Data":"cd0cc088dd0b537b2125fb8069d7f0ebf10ced483eb09460b1652461e763fa30"} Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.202410 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" event={"ID":"b05d4daf-38d1-4aa6-a4b3-a623bad4543f","Type":"ContainerStarted","Data":"3d0f5a7d6f1ee7e5856637896cb36b9d4bda9fa8778a075e55f9351409a74589"} Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.209896 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-79d6c6fc67-tshsj" event={"ID":"e90d3a14-2789-4684-998e-c392eddebbb6","Type":"ContainerStarted","Data":"0822234497a5604599daad639876c05f926efa1cfab734bb5708cf188cd33150"} Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.209941 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-79d6c6fc67-tshsj" event={"ID":"e90d3a14-2789-4684-998e-c392eddebbb6","Type":"ContainerStarted","Data":"d803c0e31c4b7dee29510b3c2def08653ae3f35685f02968fc4b44b86ddda7cb"} Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.209951 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-79d6c6fc67-tshsj" event={"ID":"e90d3a14-2789-4684-998e-c392eddebbb6","Type":"ContainerStarted","Data":"bbbf571f190d179579755fe3fbc7da370d3bd391796bf2143b1b6e4c791ba3bf"} Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.222855 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-7bddb697b-t7597" podStartSLOduration=2.22283841 podStartE2EDuration="2.22283841s" podCreationTimestamp="2025-10-06 23:02:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:02:47.219899659 +0000 UTC m=+5512.512936393" watchObservedRunningTime="2025-10-06 23:02:47.22283841 +0000 UTC m=+5512.515875144" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.288096 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-79d6c6fc67-tshsj" podStartSLOduration=2.288073806 podStartE2EDuration="2.288073806s" podCreationTimestamp="2025-10-06 23:02:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:02:47.278173806 +0000 UTC m=+5512.571210550" watchObservedRunningTime="2025-10-06 23:02:47.288073806 +0000 UTC m=+5512.581110540" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.316638 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6c885cf86d-7mdwj" podStartSLOduration=2.316591726 podStartE2EDuration="2.316591726s" podCreationTimestamp="2025-10-06 23:02:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:02:47.308868045 +0000 UTC m=+5512.601904779" watchObservedRunningTime="2025-10-06 23:02:47.316591726 +0000 UTC m=+5512.609628470" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.735421 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6c879f4fd6-dj9kj"] Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.737904 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.739477 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.740818 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.746379 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6c879f4fd6-dj9kj"] Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.890988 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfssg\" (UniqueName: \"kubernetes.io/projected/f0b1051a-545a-4bcb-940e-962ee142eda6-kube-api-access-sfssg\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.891245 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f0b1051a-545a-4bcb-940e-962ee142eda6-config-data-custom\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.891326 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0b1051a-545a-4bcb-940e-962ee142eda6-combined-ca-bundle\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.891439 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0b1051a-545a-4bcb-940e-962ee142eda6-config-data\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.891566 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0b1051a-545a-4bcb-940e-962ee142eda6-internal-tls-certs\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.891683 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0b1051a-545a-4bcb-940e-962ee142eda6-public-tls-certs\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.891824 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0b1051a-545a-4bcb-940e-962ee142eda6-logs\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.993257 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfssg\" (UniqueName: \"kubernetes.io/projected/f0b1051a-545a-4bcb-940e-962ee142eda6-kube-api-access-sfssg\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.993307 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f0b1051a-545a-4bcb-940e-962ee142eda6-config-data-custom\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.993330 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0b1051a-545a-4bcb-940e-962ee142eda6-combined-ca-bundle\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.993344 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0b1051a-545a-4bcb-940e-962ee142eda6-config-data\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.993368 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0b1051a-545a-4bcb-940e-962ee142eda6-internal-tls-certs\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.993394 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0b1051a-545a-4bcb-940e-962ee142eda6-public-tls-certs\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.993472 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0b1051a-545a-4bcb-940e-962ee142eda6-logs\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.994410 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0b1051a-545a-4bcb-940e-962ee142eda6-logs\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:47 crc kubenswrapper[5014]: I1006 23:02:47.999240 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0b1051a-545a-4bcb-940e-962ee142eda6-combined-ca-bundle\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:48 crc kubenswrapper[5014]: I1006 23:02:48.000056 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0b1051a-545a-4bcb-940e-962ee142eda6-public-tls-certs\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:48 crc kubenswrapper[5014]: I1006 23:02:48.007980 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0b1051a-545a-4bcb-940e-962ee142eda6-config-data\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:48 crc kubenswrapper[5014]: I1006 23:02:48.012371 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f0b1051a-545a-4bcb-940e-962ee142eda6-config-data-custom\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:48 crc kubenswrapper[5014]: I1006 23:02:48.013281 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0b1051a-545a-4bcb-940e-962ee142eda6-internal-tls-certs\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:48 crc kubenswrapper[5014]: I1006 23:02:48.020024 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfssg\" (UniqueName: \"kubernetes.io/projected/f0b1051a-545a-4bcb-940e-962ee142eda6-kube-api-access-sfssg\") pod \"barbican-api-6c879f4fd6-dj9kj\" (UID: \"f0b1051a-545a-4bcb-940e-962ee142eda6\") " pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:48 crc kubenswrapper[5014]: I1006 23:02:48.064668 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:48 crc kubenswrapper[5014]: I1006 23:02:48.219802 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" event={"ID":"b05d4daf-38d1-4aa6-a4b3-a623bad4543f","Type":"ContainerStarted","Data":"0e89ed657f6058e866971770d6b296c9502177eeb9d334b7d5a5c36a3d587476"} Oct 06 23:02:48 crc kubenswrapper[5014]: I1006 23:02:48.245258 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" podStartSLOduration=3.245233682 podStartE2EDuration="3.245233682s" podCreationTimestamp="2025-10-06 23:02:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:02:48.239774022 +0000 UTC m=+5513.532810766" watchObservedRunningTime="2025-10-06 23:02:48.245233682 +0000 UTC m=+5513.538270416" Oct 06 23:02:48 crc kubenswrapper[5014]: I1006 23:02:48.524785 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6c879f4fd6-dj9kj"] Oct 06 23:02:49 crc kubenswrapper[5014]: I1006 23:02:49.231730 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6c879f4fd6-dj9kj" event={"ID":"f0b1051a-545a-4bcb-940e-962ee142eda6","Type":"ContainerStarted","Data":"aaed20a1e5b66d0ff214801283f6d91bbe1fdf00603eeb88bfd1101fb004f774"} Oct 06 23:02:49 crc kubenswrapper[5014]: I1006 23:02:49.232116 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:49 crc kubenswrapper[5014]: I1006 23:02:49.232499 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6c879f4fd6-dj9kj" event={"ID":"f0b1051a-545a-4bcb-940e-962ee142eda6","Type":"ContainerStarted","Data":"85f5ddce1ff696f87ac8ed630d69f61db39caed43c9ffb31b2ecc2de6204ec5e"} Oct 06 23:02:49 crc kubenswrapper[5014]: I1006 23:02:49.232516 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6c879f4fd6-dj9kj" event={"ID":"f0b1051a-545a-4bcb-940e-962ee142eda6","Type":"ContainerStarted","Data":"897e425f9d4805c430de573e6e55acde3732d3950b8c699d01f7779f31ab3b28"} Oct 06 23:02:49 crc kubenswrapper[5014]: I1006 23:02:49.261975 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6c879f4fd6-dj9kj" podStartSLOduration=2.261953327 podStartE2EDuration="2.261953327s" podCreationTimestamp="2025-10-06 23:02:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:02:49.253923287 +0000 UTC m=+5514.546960031" watchObservedRunningTime="2025-10-06 23:02:49.261953327 +0000 UTC m=+5514.554990071" Oct 06 23:02:50 crc kubenswrapper[5014]: I1006 23:02:50.242911 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:50 crc kubenswrapper[5014]: I1006 23:02:50.244056 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:52 crc kubenswrapper[5014]: I1006 23:02:52.444315 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:53 crc kubenswrapper[5014]: I1006 23:02:53.857527 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:54 crc kubenswrapper[5014]: I1006 23:02:54.494758 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:54 crc kubenswrapper[5014]: I1006 23:02:54.500857 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6c879f4fd6-dj9kj" Oct 06 23:02:54 crc kubenswrapper[5014]: I1006 23:02:54.611056 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6c885cf86d-7mdwj"] Oct 06 23:02:54 crc kubenswrapper[5014]: I1006 23:02:54.614943 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6c885cf86d-7mdwj" podUID="e06055a5-b160-46cc-bd61-a0ea4c766a1c" containerName="barbican-api-log" containerID="cri-o://104e9e0153de02ac3f695a9c10290943a45f3e8879ec7f9d4e266e078ff569f2" gracePeriod=30 Oct 06 23:02:54 crc kubenswrapper[5014]: I1006 23:02:54.617425 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6c885cf86d-7mdwj" podUID="e06055a5-b160-46cc-bd61-a0ea4c766a1c" containerName="barbican-api" containerID="cri-o://5d812cb832c50f2f08e16ae1851df92680c865e5a6d36d5b74f9ff4e3ddb3ac5" gracePeriod=30 Oct 06 23:02:55 crc kubenswrapper[5014]: I1006 23:02:55.299981 5014 generic.go:334] "Generic (PLEG): container finished" podID="e06055a5-b160-46cc-bd61-a0ea4c766a1c" containerID="104e9e0153de02ac3f695a9c10290943a45f3e8879ec7f9d4e266e078ff569f2" exitCode=143 Oct 06 23:02:55 crc kubenswrapper[5014]: I1006 23:02:55.301335 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6c885cf86d-7mdwj" event={"ID":"e06055a5-b160-46cc-bd61-a0ea4c766a1c","Type":"ContainerDied","Data":"104e9e0153de02ac3f695a9c10290943a45f3e8879ec7f9d4e266e078ff569f2"} Oct 06 23:02:55 crc kubenswrapper[5014]: I1006 23:02:55.921900 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.019758 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c7c55f8b9-fkwpf"] Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.024011 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" podUID="ff5b5727-cb7f-44fb-823f-a7486367ba78" containerName="dnsmasq-dns" containerID="cri-o://554d4a657625ffa80048095ff4aa0026008d843965264a98ea23801fd8f374f8" gracePeriod=10 Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.311992 5014 generic.go:334] "Generic (PLEG): container finished" podID="ff5b5727-cb7f-44fb-823f-a7486367ba78" containerID="554d4a657625ffa80048095ff4aa0026008d843965264a98ea23801fd8f374f8" exitCode=0 Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.312125 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" event={"ID":"ff5b5727-cb7f-44fb-823f-a7486367ba78","Type":"ContainerDied","Data":"554d4a657625ffa80048095ff4aa0026008d843965264a98ea23801fd8f374f8"} Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.482647 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.607313 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xkcgk\" (UniqueName: \"kubernetes.io/projected/ff5b5727-cb7f-44fb-823f-a7486367ba78-kube-api-access-xkcgk\") pod \"ff5b5727-cb7f-44fb-823f-a7486367ba78\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.607428 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-dns-svc\") pod \"ff5b5727-cb7f-44fb-823f-a7486367ba78\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.607471 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-config\") pod \"ff5b5727-cb7f-44fb-823f-a7486367ba78\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.607580 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-ovsdbserver-nb\") pod \"ff5b5727-cb7f-44fb-823f-a7486367ba78\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.607652 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-ovsdbserver-sb\") pod \"ff5b5727-cb7f-44fb-823f-a7486367ba78\" (UID: \"ff5b5727-cb7f-44fb-823f-a7486367ba78\") " Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.614377 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff5b5727-cb7f-44fb-823f-a7486367ba78-kube-api-access-xkcgk" (OuterVolumeSpecName: "kube-api-access-xkcgk") pod "ff5b5727-cb7f-44fb-823f-a7486367ba78" (UID: "ff5b5727-cb7f-44fb-823f-a7486367ba78"). InnerVolumeSpecName "kube-api-access-xkcgk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.654418 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ff5b5727-cb7f-44fb-823f-a7486367ba78" (UID: "ff5b5727-cb7f-44fb-823f-a7486367ba78"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.668426 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ff5b5727-cb7f-44fb-823f-a7486367ba78" (UID: "ff5b5727-cb7f-44fb-823f-a7486367ba78"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.682541 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ff5b5727-cb7f-44fb-823f-a7486367ba78" (UID: "ff5b5727-cb7f-44fb-823f-a7486367ba78"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.692912 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-config" (OuterVolumeSpecName: "config") pod "ff5b5727-cb7f-44fb-823f-a7486367ba78" (UID: "ff5b5727-cb7f-44fb-823f-a7486367ba78"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.709255 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.709287 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-config\") on node \"crc\" DevicePath \"\"" Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.709298 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.709311 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff5b5727-cb7f-44fb-823f-a7486367ba78-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 06 23:02:56 crc kubenswrapper[5014]: I1006 23:02:56.709323 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xkcgk\" (UniqueName: \"kubernetes.io/projected/ff5b5727-cb7f-44fb-823f-a7486367ba78-kube-api-access-xkcgk\") on node \"crc\" DevicePath \"\"" Oct 06 23:02:57 crc kubenswrapper[5014]: I1006 23:02:57.326488 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" event={"ID":"ff5b5727-cb7f-44fb-823f-a7486367ba78","Type":"ContainerDied","Data":"2f46f825e1c527d17c116dd0f4c7af7a8be91f4e3139c38c1e6ccb1fac552573"} Oct 06 23:02:57 crc kubenswrapper[5014]: I1006 23:02:57.326540 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c7c55f8b9-fkwpf" Oct 06 23:02:57 crc kubenswrapper[5014]: I1006 23:02:57.326569 5014 scope.go:117] "RemoveContainer" containerID="554d4a657625ffa80048095ff4aa0026008d843965264a98ea23801fd8f374f8" Oct 06 23:02:57 crc kubenswrapper[5014]: I1006 23:02:57.367663 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c7c55f8b9-fkwpf"] Oct 06 23:02:57 crc kubenswrapper[5014]: I1006 23:02:57.375404 5014 scope.go:117] "RemoveContainer" containerID="1ef5aa10d5b18f70f3b592b6065344aa274ccc67ea7427ccf43878cba412dd4d" Oct 06 23:02:57 crc kubenswrapper[5014]: I1006 23:02:57.378288 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6c7c55f8b9-fkwpf"] Oct 06 23:02:57 crc kubenswrapper[5014]: I1006 23:02:57.484851 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 23:02:57 crc kubenswrapper[5014]: E1006 23:02:57.485400 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:02:57 crc kubenswrapper[5014]: I1006 23:02:57.503884 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff5b5727-cb7f-44fb-823f-a7486367ba78" path="/var/lib/kubelet/pods/ff5b5727-cb7f-44fb-823f-a7486367ba78/volumes" Oct 06 23:02:57 crc kubenswrapper[5014]: I1006 23:02:57.754368 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-6c885cf86d-7mdwj" podUID="e06055a5-b160-46cc-bd61-a0ea4c766a1c" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.1.38:9311/healthcheck\": read tcp 10.217.0.2:40672->10.217.1.38:9311: read: connection reset by peer" Oct 06 23:02:57 crc kubenswrapper[5014]: I1006 23:02:57.754405 5014 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-6c885cf86d-7mdwj" podUID="e06055a5-b160-46cc-bd61-a0ea4c766a1c" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.1.38:9311/healthcheck\": read tcp 10.217.0.2:40682->10.217.1.38:9311: read: connection reset by peer" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.222551 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.336090 5014 generic.go:334] "Generic (PLEG): container finished" podID="e06055a5-b160-46cc-bd61-a0ea4c766a1c" containerID="5d812cb832c50f2f08e16ae1851df92680c865e5a6d36d5b74f9ff4e3ddb3ac5" exitCode=0 Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.336168 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6c885cf86d-7mdwj" event={"ID":"e06055a5-b160-46cc-bd61-a0ea4c766a1c","Type":"ContainerDied","Data":"5d812cb832c50f2f08e16ae1851df92680c865e5a6d36d5b74f9ff4e3ddb3ac5"} Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.336204 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6c885cf86d-7mdwj" event={"ID":"e06055a5-b160-46cc-bd61-a0ea4c766a1c","Type":"ContainerDied","Data":"b83cc864bccb7f17f3680df6042440837f55389a7448e0c09a75776692ad3ae1"} Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.336173 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6c885cf86d-7mdwj" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.336243 5014 scope.go:117] "RemoveContainer" containerID="5d812cb832c50f2f08e16ae1851df92680c865e5a6d36d5b74f9ff4e3ddb3ac5" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.344461 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-config-data-custom\") pod \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.344574 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghljt\" (UniqueName: \"kubernetes.io/projected/e06055a5-b160-46cc-bd61-a0ea4c766a1c-kube-api-access-ghljt\") pod \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.344748 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e06055a5-b160-46cc-bd61-a0ea4c766a1c-logs\") pod \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.345511 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e06055a5-b160-46cc-bd61-a0ea4c766a1c-logs" (OuterVolumeSpecName: "logs") pod "e06055a5-b160-46cc-bd61-a0ea4c766a1c" (UID: "e06055a5-b160-46cc-bd61-a0ea4c766a1c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.345599 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-config-data\") pod \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.346410 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-combined-ca-bundle\") pod \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\" (UID: \"e06055a5-b160-46cc-bd61-a0ea4c766a1c\") " Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.347069 5014 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e06055a5-b160-46cc-bd61-a0ea4c766a1c-logs\") on node \"crc\" DevicePath \"\"" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.353876 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "e06055a5-b160-46cc-bd61-a0ea4c766a1c" (UID: "e06055a5-b160-46cc-bd61-a0ea4c766a1c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.356460 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e06055a5-b160-46cc-bd61-a0ea4c766a1c-kube-api-access-ghljt" (OuterVolumeSpecName: "kube-api-access-ghljt") pod "e06055a5-b160-46cc-bd61-a0ea4c766a1c" (UID: "e06055a5-b160-46cc-bd61-a0ea4c766a1c"). InnerVolumeSpecName "kube-api-access-ghljt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.362142 5014 scope.go:117] "RemoveContainer" containerID="104e9e0153de02ac3f695a9c10290943a45f3e8879ec7f9d4e266e078ff569f2" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.376287 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e06055a5-b160-46cc-bd61-a0ea4c766a1c" (UID: "e06055a5-b160-46cc-bd61-a0ea4c766a1c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.401427 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-config-data" (OuterVolumeSpecName: "config-data") pod "e06055a5-b160-46cc-bd61-a0ea4c766a1c" (UID: "e06055a5-b160-46cc-bd61-a0ea4c766a1c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.448451 5014 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.448481 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghljt\" (UniqueName: \"kubernetes.io/projected/e06055a5-b160-46cc-bd61-a0ea4c766a1c-kube-api-access-ghljt\") on node \"crc\" DevicePath \"\"" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.448496 5014 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-config-data\") on node \"crc\" DevicePath \"\"" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.448508 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e06055a5-b160-46cc-bd61-a0ea4c766a1c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.463630 5014 scope.go:117] "RemoveContainer" containerID="5d812cb832c50f2f08e16ae1851df92680c865e5a6d36d5b74f9ff4e3ddb3ac5" Oct 06 23:02:58 crc kubenswrapper[5014]: E1006 23:02:58.463891 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d812cb832c50f2f08e16ae1851df92680c865e5a6d36d5b74f9ff4e3ddb3ac5\": container with ID starting with 5d812cb832c50f2f08e16ae1851df92680c865e5a6d36d5b74f9ff4e3ddb3ac5 not found: ID does not exist" containerID="5d812cb832c50f2f08e16ae1851df92680c865e5a6d36d5b74f9ff4e3ddb3ac5" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.463938 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d812cb832c50f2f08e16ae1851df92680c865e5a6d36d5b74f9ff4e3ddb3ac5"} err="failed to get container status \"5d812cb832c50f2f08e16ae1851df92680c865e5a6d36d5b74f9ff4e3ddb3ac5\": rpc error: code = NotFound desc = could not find container \"5d812cb832c50f2f08e16ae1851df92680c865e5a6d36d5b74f9ff4e3ddb3ac5\": container with ID starting with 5d812cb832c50f2f08e16ae1851df92680c865e5a6d36d5b74f9ff4e3ddb3ac5 not found: ID does not exist" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.463965 5014 scope.go:117] "RemoveContainer" containerID="104e9e0153de02ac3f695a9c10290943a45f3e8879ec7f9d4e266e078ff569f2" Oct 06 23:02:58 crc kubenswrapper[5014]: E1006 23:02:58.464286 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"104e9e0153de02ac3f695a9c10290943a45f3e8879ec7f9d4e266e078ff569f2\": container with ID starting with 104e9e0153de02ac3f695a9c10290943a45f3e8879ec7f9d4e266e078ff569f2 not found: ID does not exist" containerID="104e9e0153de02ac3f695a9c10290943a45f3e8879ec7f9d4e266e078ff569f2" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.464333 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"104e9e0153de02ac3f695a9c10290943a45f3e8879ec7f9d4e266e078ff569f2"} err="failed to get container status \"104e9e0153de02ac3f695a9c10290943a45f3e8879ec7f9d4e266e078ff569f2\": rpc error: code = NotFound desc = could not find container \"104e9e0153de02ac3f695a9c10290943a45f3e8879ec7f9d4e266e078ff569f2\": container with ID starting with 104e9e0153de02ac3f695a9c10290943a45f3e8879ec7f9d4e266e078ff569f2 not found: ID does not exist" Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.698054 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6c885cf86d-7mdwj"] Oct 06 23:02:58 crc kubenswrapper[5014]: I1006 23:02:58.708254 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-6c885cf86d-7mdwj"] Oct 06 23:02:59 crc kubenswrapper[5014]: I1006 23:02:59.497536 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e06055a5-b160-46cc-bd61-a0ea4c766a1c" path="/var/lib/kubelet/pods/e06055a5-b160-46cc-bd61-a0ea4c766a1c/volumes" Oct 06 23:03:01 crc kubenswrapper[5014]: I1006 23:03:01.289239 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-t6vdl"] Oct 06 23:03:01 crc kubenswrapper[5014]: E1006 23:03:01.289851 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff5b5727-cb7f-44fb-823f-a7486367ba78" containerName="dnsmasq-dns" Oct 06 23:03:01 crc kubenswrapper[5014]: I1006 23:03:01.289865 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff5b5727-cb7f-44fb-823f-a7486367ba78" containerName="dnsmasq-dns" Oct 06 23:03:01 crc kubenswrapper[5014]: E1006 23:03:01.289876 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e06055a5-b160-46cc-bd61-a0ea4c766a1c" containerName="barbican-api-log" Oct 06 23:03:01 crc kubenswrapper[5014]: I1006 23:03:01.289882 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="e06055a5-b160-46cc-bd61-a0ea4c766a1c" containerName="barbican-api-log" Oct 06 23:03:01 crc kubenswrapper[5014]: E1006 23:03:01.289901 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e06055a5-b160-46cc-bd61-a0ea4c766a1c" containerName="barbican-api" Oct 06 23:03:01 crc kubenswrapper[5014]: I1006 23:03:01.289907 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="e06055a5-b160-46cc-bd61-a0ea4c766a1c" containerName="barbican-api" Oct 06 23:03:01 crc kubenswrapper[5014]: E1006 23:03:01.289916 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff5b5727-cb7f-44fb-823f-a7486367ba78" containerName="init" Oct 06 23:03:01 crc kubenswrapper[5014]: I1006 23:03:01.289922 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff5b5727-cb7f-44fb-823f-a7486367ba78" containerName="init" Oct 06 23:03:01 crc kubenswrapper[5014]: I1006 23:03:01.290072 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff5b5727-cb7f-44fb-823f-a7486367ba78" containerName="dnsmasq-dns" Oct 06 23:03:01 crc kubenswrapper[5014]: I1006 23:03:01.290084 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="e06055a5-b160-46cc-bd61-a0ea4c766a1c" containerName="barbican-api-log" Oct 06 23:03:01 crc kubenswrapper[5014]: I1006 23:03:01.290093 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="e06055a5-b160-46cc-bd61-a0ea4c766a1c" containerName="barbican-api" Oct 06 23:03:01 crc kubenswrapper[5014]: I1006 23:03:01.290592 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-t6vdl" Oct 06 23:03:01 crc kubenswrapper[5014]: I1006 23:03:01.305466 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-t6vdl"] Oct 06 23:03:01 crc kubenswrapper[5014]: I1006 23:03:01.406398 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rbg2\" (UniqueName: \"kubernetes.io/projected/b1c8aaf9-b44d-41da-9beb-ffaf1d68b938-kube-api-access-2rbg2\") pod \"neutron-db-create-t6vdl\" (UID: \"b1c8aaf9-b44d-41da-9beb-ffaf1d68b938\") " pod="openstack/neutron-db-create-t6vdl" Oct 06 23:03:01 crc kubenswrapper[5014]: I1006 23:03:01.508462 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rbg2\" (UniqueName: \"kubernetes.io/projected/b1c8aaf9-b44d-41da-9beb-ffaf1d68b938-kube-api-access-2rbg2\") pod \"neutron-db-create-t6vdl\" (UID: \"b1c8aaf9-b44d-41da-9beb-ffaf1d68b938\") " pod="openstack/neutron-db-create-t6vdl" Oct 06 23:03:01 crc kubenswrapper[5014]: I1006 23:03:01.530113 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rbg2\" (UniqueName: \"kubernetes.io/projected/b1c8aaf9-b44d-41da-9beb-ffaf1d68b938-kube-api-access-2rbg2\") pod \"neutron-db-create-t6vdl\" (UID: \"b1c8aaf9-b44d-41da-9beb-ffaf1d68b938\") " pod="openstack/neutron-db-create-t6vdl" Oct 06 23:03:01 crc kubenswrapper[5014]: I1006 23:03:01.611211 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-t6vdl" Oct 06 23:03:02 crc kubenswrapper[5014]: I1006 23:03:02.146869 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-t6vdl"] Oct 06 23:03:02 crc kubenswrapper[5014]: W1006 23:03:02.153830 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1c8aaf9_b44d_41da_9beb_ffaf1d68b938.slice/crio-658dac1414caf89eee7a035e3c022a9f502c1ef7677752d6ea2c482cfa8b47aa WatchSource:0}: Error finding container 658dac1414caf89eee7a035e3c022a9f502c1ef7677752d6ea2c482cfa8b47aa: Status 404 returned error can't find the container with id 658dac1414caf89eee7a035e3c022a9f502c1ef7677752d6ea2c482cfa8b47aa Oct 06 23:03:02 crc kubenswrapper[5014]: I1006 23:03:02.393364 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-t6vdl" event={"ID":"b1c8aaf9-b44d-41da-9beb-ffaf1d68b938","Type":"ContainerStarted","Data":"658dac1414caf89eee7a035e3c022a9f502c1ef7677752d6ea2c482cfa8b47aa"} Oct 06 23:03:03 crc kubenswrapper[5014]: I1006 23:03:03.407850 5014 generic.go:334] "Generic (PLEG): container finished" podID="b1c8aaf9-b44d-41da-9beb-ffaf1d68b938" containerID="62eea1ba3501d668718423b6bf224ad8f36204f3c7a9793614ecbdf35187b411" exitCode=0 Oct 06 23:03:03 crc kubenswrapper[5014]: I1006 23:03:03.407936 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-t6vdl" event={"ID":"b1c8aaf9-b44d-41da-9beb-ffaf1d68b938","Type":"ContainerDied","Data":"62eea1ba3501d668718423b6bf224ad8f36204f3c7a9793614ecbdf35187b411"} Oct 06 23:03:04 crc kubenswrapper[5014]: I1006 23:03:04.836693 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-t6vdl" Oct 06 23:03:04 crc kubenswrapper[5014]: I1006 23:03:04.999289 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rbg2\" (UniqueName: \"kubernetes.io/projected/b1c8aaf9-b44d-41da-9beb-ffaf1d68b938-kube-api-access-2rbg2\") pod \"b1c8aaf9-b44d-41da-9beb-ffaf1d68b938\" (UID: \"b1c8aaf9-b44d-41da-9beb-ffaf1d68b938\") " Oct 06 23:03:05 crc kubenswrapper[5014]: I1006 23:03:05.012171 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1c8aaf9-b44d-41da-9beb-ffaf1d68b938-kube-api-access-2rbg2" (OuterVolumeSpecName: "kube-api-access-2rbg2") pod "b1c8aaf9-b44d-41da-9beb-ffaf1d68b938" (UID: "b1c8aaf9-b44d-41da-9beb-ffaf1d68b938"). InnerVolumeSpecName "kube-api-access-2rbg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:03:05 crc kubenswrapper[5014]: I1006 23:03:05.102490 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rbg2\" (UniqueName: \"kubernetes.io/projected/b1c8aaf9-b44d-41da-9beb-ffaf1d68b938-kube-api-access-2rbg2\") on node \"crc\" DevicePath \"\"" Oct 06 23:03:05 crc kubenswrapper[5014]: I1006 23:03:05.432673 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-t6vdl" event={"ID":"b1c8aaf9-b44d-41da-9beb-ffaf1d68b938","Type":"ContainerDied","Data":"658dac1414caf89eee7a035e3c022a9f502c1ef7677752d6ea2c482cfa8b47aa"} Oct 06 23:03:05 crc kubenswrapper[5014]: I1006 23:03:05.433190 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="658dac1414caf89eee7a035e3c022a9f502c1ef7677752d6ea2c482cfa8b47aa" Oct 06 23:03:05 crc kubenswrapper[5014]: I1006 23:03:05.432850 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-t6vdl" Oct 06 23:03:10 crc kubenswrapper[5014]: I1006 23:03:10.486071 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 23:03:10 crc kubenswrapper[5014]: E1006 23:03:10.487226 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:03:11 crc kubenswrapper[5014]: I1006 23:03:11.450395 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-0178-account-create-9r9g9"] Oct 06 23:03:11 crc kubenswrapper[5014]: E1006 23:03:11.451048 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1c8aaf9-b44d-41da-9beb-ffaf1d68b938" containerName="mariadb-database-create" Oct 06 23:03:11 crc kubenswrapper[5014]: I1006 23:03:11.451071 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1c8aaf9-b44d-41da-9beb-ffaf1d68b938" containerName="mariadb-database-create" Oct 06 23:03:11 crc kubenswrapper[5014]: I1006 23:03:11.451404 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1c8aaf9-b44d-41da-9beb-ffaf1d68b938" containerName="mariadb-database-create" Oct 06 23:03:11 crc kubenswrapper[5014]: I1006 23:03:11.452472 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0178-account-create-9r9g9" Oct 06 23:03:11 crc kubenswrapper[5014]: I1006 23:03:11.457299 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Oct 06 23:03:11 crc kubenswrapper[5014]: I1006 23:03:11.462084 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-0178-account-create-9r9g9"] Oct 06 23:03:11 crc kubenswrapper[5014]: I1006 23:03:11.558714 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64zvq\" (UniqueName: \"kubernetes.io/projected/d7abf808-7de9-4072-a359-0b96605a8b33-kube-api-access-64zvq\") pod \"neutron-0178-account-create-9r9g9\" (UID: \"d7abf808-7de9-4072-a359-0b96605a8b33\") " pod="openstack/neutron-0178-account-create-9r9g9" Oct 06 23:03:11 crc kubenswrapper[5014]: I1006 23:03:11.660431 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64zvq\" (UniqueName: \"kubernetes.io/projected/d7abf808-7de9-4072-a359-0b96605a8b33-kube-api-access-64zvq\") pod \"neutron-0178-account-create-9r9g9\" (UID: \"d7abf808-7de9-4072-a359-0b96605a8b33\") " pod="openstack/neutron-0178-account-create-9r9g9" Oct 06 23:03:11 crc kubenswrapper[5014]: I1006 23:03:11.683579 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64zvq\" (UniqueName: \"kubernetes.io/projected/d7abf808-7de9-4072-a359-0b96605a8b33-kube-api-access-64zvq\") pod \"neutron-0178-account-create-9r9g9\" (UID: \"d7abf808-7de9-4072-a359-0b96605a8b33\") " pod="openstack/neutron-0178-account-create-9r9g9" Oct 06 23:03:11 crc kubenswrapper[5014]: I1006 23:03:11.781313 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0178-account-create-9r9g9" Oct 06 23:03:12 crc kubenswrapper[5014]: I1006 23:03:12.182977 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-0178-account-create-9r9g9"] Oct 06 23:03:12 crc kubenswrapper[5014]: W1006 23:03:12.187549 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd7abf808_7de9_4072_a359_0b96605a8b33.slice/crio-029f6a819360d4b1a8d6b398ca3057c08df5d63e591b0d340d14e002633b870b WatchSource:0}: Error finding container 029f6a819360d4b1a8d6b398ca3057c08df5d63e591b0d340d14e002633b870b: Status 404 returned error can't find the container with id 029f6a819360d4b1a8d6b398ca3057c08df5d63e591b0d340d14e002633b870b Oct 06 23:03:12 crc kubenswrapper[5014]: I1006 23:03:12.509995 5014 generic.go:334] "Generic (PLEG): container finished" podID="d7abf808-7de9-4072-a359-0b96605a8b33" containerID="4861594839ac97638e6453e9d9037d2c7f6cee6e44dbd108c9857e303acc03a7" exitCode=0 Oct 06 23:03:12 crc kubenswrapper[5014]: I1006 23:03:12.510122 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0178-account-create-9r9g9" event={"ID":"d7abf808-7de9-4072-a359-0b96605a8b33","Type":"ContainerDied","Data":"4861594839ac97638e6453e9d9037d2c7f6cee6e44dbd108c9857e303acc03a7"} Oct 06 23:03:12 crc kubenswrapper[5014]: I1006 23:03:12.510353 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0178-account-create-9r9g9" event={"ID":"d7abf808-7de9-4072-a359-0b96605a8b33","Type":"ContainerStarted","Data":"029f6a819360d4b1a8d6b398ca3057c08df5d63e591b0d340d14e002633b870b"} Oct 06 23:03:13 crc kubenswrapper[5014]: I1006 23:03:13.931548 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0178-account-create-9r9g9" Oct 06 23:03:14 crc kubenswrapper[5014]: I1006 23:03:14.127170 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64zvq\" (UniqueName: \"kubernetes.io/projected/d7abf808-7de9-4072-a359-0b96605a8b33-kube-api-access-64zvq\") pod \"d7abf808-7de9-4072-a359-0b96605a8b33\" (UID: \"d7abf808-7de9-4072-a359-0b96605a8b33\") " Oct 06 23:03:14 crc kubenswrapper[5014]: I1006 23:03:14.137928 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7abf808-7de9-4072-a359-0b96605a8b33-kube-api-access-64zvq" (OuterVolumeSpecName: "kube-api-access-64zvq") pod "d7abf808-7de9-4072-a359-0b96605a8b33" (UID: "d7abf808-7de9-4072-a359-0b96605a8b33"). InnerVolumeSpecName "kube-api-access-64zvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:03:14 crc kubenswrapper[5014]: I1006 23:03:14.229939 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64zvq\" (UniqueName: \"kubernetes.io/projected/d7abf808-7de9-4072-a359-0b96605a8b33-kube-api-access-64zvq\") on node \"crc\" DevicePath \"\"" Oct 06 23:03:14 crc kubenswrapper[5014]: I1006 23:03:14.538167 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0178-account-create-9r9g9" event={"ID":"d7abf808-7de9-4072-a359-0b96605a8b33","Type":"ContainerDied","Data":"029f6a819360d4b1a8d6b398ca3057c08df5d63e591b0d340d14e002633b870b"} Oct 06 23:03:14 crc kubenswrapper[5014]: I1006 23:03:14.538224 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="029f6a819360d4b1a8d6b398ca3057c08df5d63e591b0d340d14e002633b870b" Oct 06 23:03:14 crc kubenswrapper[5014]: I1006 23:03:14.538286 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0178-account-create-9r9g9" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.638525 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-c6856"] Oct 06 23:03:16 crc kubenswrapper[5014]: E1006 23:03:16.639930 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7abf808-7de9-4072-a359-0b96605a8b33" containerName="mariadb-account-create" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.639950 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7abf808-7de9-4072-a359-0b96605a8b33" containerName="mariadb-account-create" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.640143 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7abf808-7de9-4072-a359-0b96605a8b33" containerName="mariadb-account-create" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.640904 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-c6856" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.644026 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.644346 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-j44h6" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.645691 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.650033 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-c6856"] Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.677820 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/764bd6b3-4473-4221-8d5c-9ed898cb677b-combined-ca-bundle\") pod \"neutron-db-sync-c6856\" (UID: \"764bd6b3-4473-4221-8d5c-9ed898cb677b\") " pod="openstack/neutron-db-sync-c6856" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.677885 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/764bd6b3-4473-4221-8d5c-9ed898cb677b-config\") pod \"neutron-db-sync-c6856\" (UID: \"764bd6b3-4473-4221-8d5c-9ed898cb677b\") " pod="openstack/neutron-db-sync-c6856" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.677959 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pq82\" (UniqueName: \"kubernetes.io/projected/764bd6b3-4473-4221-8d5c-9ed898cb677b-kube-api-access-8pq82\") pod \"neutron-db-sync-c6856\" (UID: \"764bd6b3-4473-4221-8d5c-9ed898cb677b\") " pod="openstack/neutron-db-sync-c6856" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.779686 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/764bd6b3-4473-4221-8d5c-9ed898cb677b-config\") pod \"neutron-db-sync-c6856\" (UID: \"764bd6b3-4473-4221-8d5c-9ed898cb677b\") " pod="openstack/neutron-db-sync-c6856" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.779786 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pq82\" (UniqueName: \"kubernetes.io/projected/764bd6b3-4473-4221-8d5c-9ed898cb677b-kube-api-access-8pq82\") pod \"neutron-db-sync-c6856\" (UID: \"764bd6b3-4473-4221-8d5c-9ed898cb677b\") " pod="openstack/neutron-db-sync-c6856" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.780002 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/764bd6b3-4473-4221-8d5c-9ed898cb677b-combined-ca-bundle\") pod \"neutron-db-sync-c6856\" (UID: \"764bd6b3-4473-4221-8d5c-9ed898cb677b\") " pod="openstack/neutron-db-sync-c6856" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.786344 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/764bd6b3-4473-4221-8d5c-9ed898cb677b-config\") pod \"neutron-db-sync-c6856\" (UID: \"764bd6b3-4473-4221-8d5c-9ed898cb677b\") " pod="openstack/neutron-db-sync-c6856" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.794078 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/764bd6b3-4473-4221-8d5c-9ed898cb677b-combined-ca-bundle\") pod \"neutron-db-sync-c6856\" (UID: \"764bd6b3-4473-4221-8d5c-9ed898cb677b\") " pod="openstack/neutron-db-sync-c6856" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.796982 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pq82\" (UniqueName: \"kubernetes.io/projected/764bd6b3-4473-4221-8d5c-9ed898cb677b-kube-api-access-8pq82\") pod \"neutron-db-sync-c6856\" (UID: \"764bd6b3-4473-4221-8d5c-9ed898cb677b\") " pod="openstack/neutron-db-sync-c6856" Oct 06 23:03:16 crc kubenswrapper[5014]: I1006 23:03:16.973310 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-c6856" Oct 06 23:03:17 crc kubenswrapper[5014]: I1006 23:03:17.456869 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-c6856"] Oct 06 23:03:17 crc kubenswrapper[5014]: I1006 23:03:17.564812 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-c6856" event={"ID":"764bd6b3-4473-4221-8d5c-9ed898cb677b","Type":"ContainerStarted","Data":"b7aea80301585550d57bff06e223e5a5b3b09b8e4e716bb5ca3dfbf2c368b077"} Oct 06 23:03:18 crc kubenswrapper[5014]: I1006 23:03:18.576635 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-c6856" event={"ID":"764bd6b3-4473-4221-8d5c-9ed898cb677b","Type":"ContainerStarted","Data":"f97961cc8dd27f250091eb7ac2d1c18a198699c90801a7bbb1b5306e89d560bb"} Oct 06 23:03:18 crc kubenswrapper[5014]: I1006 23:03:18.593995 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-c6856" podStartSLOduration=2.593973838 podStartE2EDuration="2.593973838s" podCreationTimestamp="2025-10-06 23:03:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:03:18.593600856 +0000 UTC m=+5543.886637620" watchObservedRunningTime="2025-10-06 23:03:18.593973838 +0000 UTC m=+5543.887010592" Oct 06 23:03:21 crc kubenswrapper[5014]: I1006 23:03:21.633755 5014 generic.go:334] "Generic (PLEG): container finished" podID="764bd6b3-4473-4221-8d5c-9ed898cb677b" containerID="f97961cc8dd27f250091eb7ac2d1c18a198699c90801a7bbb1b5306e89d560bb" exitCode=0 Oct 06 23:03:21 crc kubenswrapper[5014]: I1006 23:03:21.634100 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-c6856" event={"ID":"764bd6b3-4473-4221-8d5c-9ed898cb677b","Type":"ContainerDied","Data":"f97961cc8dd27f250091eb7ac2d1c18a198699c90801a7bbb1b5306e89d560bb"} Oct 06 23:03:22 crc kubenswrapper[5014]: I1006 23:03:22.965641 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-c6856" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.097190 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pq82\" (UniqueName: \"kubernetes.io/projected/764bd6b3-4473-4221-8d5c-9ed898cb677b-kube-api-access-8pq82\") pod \"764bd6b3-4473-4221-8d5c-9ed898cb677b\" (UID: \"764bd6b3-4473-4221-8d5c-9ed898cb677b\") " Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.097746 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/764bd6b3-4473-4221-8d5c-9ed898cb677b-config\") pod \"764bd6b3-4473-4221-8d5c-9ed898cb677b\" (UID: \"764bd6b3-4473-4221-8d5c-9ed898cb677b\") " Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.097825 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/764bd6b3-4473-4221-8d5c-9ed898cb677b-combined-ca-bundle\") pod \"764bd6b3-4473-4221-8d5c-9ed898cb677b\" (UID: \"764bd6b3-4473-4221-8d5c-9ed898cb677b\") " Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.104793 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/764bd6b3-4473-4221-8d5c-9ed898cb677b-kube-api-access-8pq82" (OuterVolumeSpecName: "kube-api-access-8pq82") pod "764bd6b3-4473-4221-8d5c-9ed898cb677b" (UID: "764bd6b3-4473-4221-8d5c-9ed898cb677b"). InnerVolumeSpecName "kube-api-access-8pq82". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.126317 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/764bd6b3-4473-4221-8d5c-9ed898cb677b-config" (OuterVolumeSpecName: "config") pod "764bd6b3-4473-4221-8d5c-9ed898cb677b" (UID: "764bd6b3-4473-4221-8d5c-9ed898cb677b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.136850 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/764bd6b3-4473-4221-8d5c-9ed898cb677b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "764bd6b3-4473-4221-8d5c-9ed898cb677b" (UID: "764bd6b3-4473-4221-8d5c-9ed898cb677b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.199560 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/764bd6b3-4473-4221-8d5c-9ed898cb677b-config\") on node \"crc\" DevicePath \"\"" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.199603 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/764bd6b3-4473-4221-8d5c-9ed898cb677b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.199631 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8pq82\" (UniqueName: \"kubernetes.io/projected/764bd6b3-4473-4221-8d5c-9ed898cb677b-kube-api-access-8pq82\") on node \"crc\" DevicePath \"\"" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.653409 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-c6856" event={"ID":"764bd6b3-4473-4221-8d5c-9ed898cb677b","Type":"ContainerDied","Data":"b7aea80301585550d57bff06e223e5a5b3b09b8e4e716bb5ca3dfbf2c368b077"} Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.653445 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-c6856" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.653452 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b7aea80301585550d57bff06e223e5a5b3b09b8e4e716bb5ca3dfbf2c368b077" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.808953 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fbc86f6b9-m59jq"] Oct 06 23:03:23 crc kubenswrapper[5014]: E1006 23:03:23.809496 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="764bd6b3-4473-4221-8d5c-9ed898cb677b" containerName="neutron-db-sync" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.809517 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="764bd6b3-4473-4221-8d5c-9ed898cb677b" containerName="neutron-db-sync" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.809757 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="764bd6b3-4473-4221-8d5c-9ed898cb677b" containerName="neutron-db-sync" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.811280 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.830923 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fbc86f6b9-m59jq"] Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.877820 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-8556d5b956-xz6lp"] Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.880058 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.882878 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-j44h6" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.883041 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.883070 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.887062 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.900475 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8556d5b956-xz6lp"] Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.913676 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d04f43e6-ed68-4b30-af11-fc47b9187b32-ovsdbserver-nb\") pod \"dnsmasq-dns-fbc86f6b9-m59jq\" (UID: \"d04f43e6-ed68-4b30-af11-fc47b9187b32\") " pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.913760 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5tlc\" (UniqueName: \"kubernetes.io/projected/d04f43e6-ed68-4b30-af11-fc47b9187b32-kube-api-access-k5tlc\") pod \"dnsmasq-dns-fbc86f6b9-m59jq\" (UID: \"d04f43e6-ed68-4b30-af11-fc47b9187b32\") " pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.913794 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d04f43e6-ed68-4b30-af11-fc47b9187b32-dns-svc\") pod \"dnsmasq-dns-fbc86f6b9-m59jq\" (UID: \"d04f43e6-ed68-4b30-af11-fc47b9187b32\") " pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.914132 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d04f43e6-ed68-4b30-af11-fc47b9187b32-config\") pod \"dnsmasq-dns-fbc86f6b9-m59jq\" (UID: \"d04f43e6-ed68-4b30-af11-fc47b9187b32\") " pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:23 crc kubenswrapper[5014]: I1006 23:03:23.914235 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d04f43e6-ed68-4b30-af11-fc47b9187b32-ovsdbserver-sb\") pod \"dnsmasq-dns-fbc86f6b9-m59jq\" (UID: \"d04f43e6-ed68-4b30-af11-fc47b9187b32\") " pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.016001 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-ovndb-tls-certs\") pod \"neutron-8556d5b956-xz6lp\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.016520 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d04f43e6-ed68-4b30-af11-fc47b9187b32-config\") pod \"dnsmasq-dns-fbc86f6b9-m59jq\" (UID: \"d04f43e6-ed68-4b30-af11-fc47b9187b32\") " pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.016560 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d04f43e6-ed68-4b30-af11-fc47b9187b32-ovsdbserver-sb\") pod \"dnsmasq-dns-fbc86f6b9-m59jq\" (UID: \"d04f43e6-ed68-4b30-af11-fc47b9187b32\") " pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.016601 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-combined-ca-bundle\") pod \"neutron-8556d5b956-xz6lp\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.016662 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d04f43e6-ed68-4b30-af11-fc47b9187b32-ovsdbserver-nb\") pod \"dnsmasq-dns-fbc86f6b9-m59jq\" (UID: \"d04f43e6-ed68-4b30-af11-fc47b9187b32\") " pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.016697 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5rbf\" (UniqueName: \"kubernetes.io/projected/88fc993d-54d9-4884-ab8a-a420511f8dd9-kube-api-access-f5rbf\") pod \"neutron-8556d5b956-xz6lp\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.016750 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5tlc\" (UniqueName: \"kubernetes.io/projected/d04f43e6-ed68-4b30-af11-fc47b9187b32-kube-api-access-k5tlc\") pod \"dnsmasq-dns-fbc86f6b9-m59jq\" (UID: \"d04f43e6-ed68-4b30-af11-fc47b9187b32\") " pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.016784 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d04f43e6-ed68-4b30-af11-fc47b9187b32-dns-svc\") pod \"dnsmasq-dns-fbc86f6b9-m59jq\" (UID: \"d04f43e6-ed68-4b30-af11-fc47b9187b32\") " pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.016952 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-httpd-config\") pod \"neutron-8556d5b956-xz6lp\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.017005 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-config\") pod \"neutron-8556d5b956-xz6lp\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.017646 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d04f43e6-ed68-4b30-af11-fc47b9187b32-ovsdbserver-nb\") pod \"dnsmasq-dns-fbc86f6b9-m59jq\" (UID: \"d04f43e6-ed68-4b30-af11-fc47b9187b32\") " pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.017739 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d04f43e6-ed68-4b30-af11-fc47b9187b32-dns-svc\") pod \"dnsmasq-dns-fbc86f6b9-m59jq\" (UID: \"d04f43e6-ed68-4b30-af11-fc47b9187b32\") " pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.018422 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d04f43e6-ed68-4b30-af11-fc47b9187b32-ovsdbserver-sb\") pod \"dnsmasq-dns-fbc86f6b9-m59jq\" (UID: \"d04f43e6-ed68-4b30-af11-fc47b9187b32\") " pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.018972 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d04f43e6-ed68-4b30-af11-fc47b9187b32-config\") pod \"dnsmasq-dns-fbc86f6b9-m59jq\" (UID: \"d04f43e6-ed68-4b30-af11-fc47b9187b32\") " pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.036584 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5tlc\" (UniqueName: \"kubernetes.io/projected/d04f43e6-ed68-4b30-af11-fc47b9187b32-kube-api-access-k5tlc\") pod \"dnsmasq-dns-fbc86f6b9-m59jq\" (UID: \"d04f43e6-ed68-4b30-af11-fc47b9187b32\") " pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.118177 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-combined-ca-bundle\") pod \"neutron-8556d5b956-xz6lp\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.118246 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5rbf\" (UniqueName: \"kubernetes.io/projected/88fc993d-54d9-4884-ab8a-a420511f8dd9-kube-api-access-f5rbf\") pod \"neutron-8556d5b956-xz6lp\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.118311 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-httpd-config\") pod \"neutron-8556d5b956-xz6lp\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.118329 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-config\") pod \"neutron-8556d5b956-xz6lp\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.118365 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-ovndb-tls-certs\") pod \"neutron-8556d5b956-xz6lp\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.124090 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-ovndb-tls-certs\") pod \"neutron-8556d5b956-xz6lp\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.124220 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-httpd-config\") pod \"neutron-8556d5b956-xz6lp\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.125963 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.133241 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-combined-ca-bundle\") pod \"neutron-8556d5b956-xz6lp\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.134689 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5rbf\" (UniqueName: \"kubernetes.io/projected/88fc993d-54d9-4884-ab8a-a420511f8dd9-kube-api-access-f5rbf\") pod \"neutron-8556d5b956-xz6lp\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.137509 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-config\") pod \"neutron-8556d5b956-xz6lp\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.201585 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.602799 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fbc86f6b9-m59jq"] Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.670666 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" event={"ID":"d04f43e6-ed68-4b30-af11-fc47b9187b32","Type":"ContainerStarted","Data":"a2a87c93f7d9d240a1154c2a00bf1b8b44736806246add16a4338be6d7ccd361"} Oct 06 23:03:24 crc kubenswrapper[5014]: I1006 23:03:24.751361 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8556d5b956-xz6lp"] Oct 06 23:03:24 crc kubenswrapper[5014]: W1006 23:03:24.761682 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88fc993d_54d9_4884_ab8a_a420511f8dd9.slice/crio-3399bb4182d33a028bcb9c201eb729c15910c0cef4ecbcd3c105c3417cd8bfac WatchSource:0}: Error finding container 3399bb4182d33a028bcb9c201eb729c15910c0cef4ecbcd3c105c3417cd8bfac: Status 404 returned error can't find the container with id 3399bb4182d33a028bcb9c201eb729c15910c0cef4ecbcd3c105c3417cd8bfac Oct 06 23:03:25 crc kubenswrapper[5014]: I1006 23:03:25.492331 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 23:03:25 crc kubenswrapper[5014]: I1006 23:03:25.683879 5014 generic.go:334] "Generic (PLEG): container finished" podID="d04f43e6-ed68-4b30-af11-fc47b9187b32" containerID="f7ba90e51b1d1755a11217bc273890b0bb065d32aaaa5f8fe26c6c66124550c5" exitCode=0 Oct 06 23:03:25 crc kubenswrapper[5014]: I1006 23:03:25.684274 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" event={"ID":"d04f43e6-ed68-4b30-af11-fc47b9187b32","Type":"ContainerDied","Data":"f7ba90e51b1d1755a11217bc273890b0bb065d32aaaa5f8fe26c6c66124550c5"} Oct 06 23:03:25 crc kubenswrapper[5014]: I1006 23:03:25.692876 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8556d5b956-xz6lp" event={"ID":"88fc993d-54d9-4884-ab8a-a420511f8dd9","Type":"ContainerStarted","Data":"2acfc78fde0b1aa8e199b46074cd058f9cfb5b4b7119edf3cdaf1bb32c824d17"} Oct 06 23:03:25 crc kubenswrapper[5014]: I1006 23:03:25.692920 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8556d5b956-xz6lp" event={"ID":"88fc993d-54d9-4884-ab8a-a420511f8dd9","Type":"ContainerStarted","Data":"60c4f37b3abb5e2eaa04c764613a16c09d8a80011f32afaefe246d67075bb4d8"} Oct 06 23:03:25 crc kubenswrapper[5014]: I1006 23:03:25.692931 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8556d5b956-xz6lp" event={"ID":"88fc993d-54d9-4884-ab8a-a420511f8dd9","Type":"ContainerStarted","Data":"3399bb4182d33a028bcb9c201eb729c15910c0cef4ecbcd3c105c3417cd8bfac"} Oct 06 23:03:25 crc kubenswrapper[5014]: I1006 23:03:25.693818 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:25 crc kubenswrapper[5014]: I1006 23:03:25.730177 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-8556d5b956-xz6lp" podStartSLOduration=2.730162611 podStartE2EDuration="2.730162611s" podCreationTimestamp="2025-10-06 23:03:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:03:25.728825769 +0000 UTC m=+5551.021862513" watchObservedRunningTime="2025-10-06 23:03:25.730162611 +0000 UTC m=+5551.023199345" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.624681 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5c84bf6665-5lrrl"] Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.626674 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.633360 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.637436 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5c84bf6665-5lrrl"] Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.639164 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.704191 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"b714fdc15c24e51123635f494a6a27ab187d08603b236a717c87633ed54de216"} Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.706752 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" event={"ID":"d04f43e6-ed68-4b30-af11-fc47b9187b32","Type":"ContainerStarted","Data":"406758fccbc9df4d4aeca0e3bb5bff5e03470547d86ed53232213aa6509623b1"} Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.706862 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.735956 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" podStartSLOduration=3.735936155 podStartE2EDuration="3.735936155s" podCreationTimestamp="2025-10-06 23:03:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:03:26.732979623 +0000 UTC m=+5552.026016357" watchObservedRunningTime="2025-10-06 23:03:26.735936155 +0000 UTC m=+5552.028972889" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.782678 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-ovndb-tls-certs\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.782731 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntnwp\" (UniqueName: \"kubernetes.io/projected/e21fe075-9b63-41c8-a283-7659139c4ecd-kube-api-access-ntnwp\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.782755 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-public-tls-certs\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.782813 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-config\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.782830 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-combined-ca-bundle\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.782846 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-httpd-config\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.782954 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-internal-tls-certs\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.884158 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-internal-tls-certs\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.884238 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-ovndb-tls-certs\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.884261 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntnwp\" (UniqueName: \"kubernetes.io/projected/e21fe075-9b63-41c8-a283-7659139c4ecd-kube-api-access-ntnwp\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.884283 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-public-tls-certs\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.884316 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-config\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.884333 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-combined-ca-bundle\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.884347 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-httpd-config\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.891513 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-ovndb-tls-certs\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.891548 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-combined-ca-bundle\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.892054 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-config\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.892104 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-internal-tls-certs\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.893342 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-public-tls-certs\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.897177 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e21fe075-9b63-41c8-a283-7659139c4ecd-httpd-config\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.902682 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntnwp\" (UniqueName: \"kubernetes.io/projected/e21fe075-9b63-41c8-a283-7659139c4ecd-kube-api-access-ntnwp\") pod \"neutron-5c84bf6665-5lrrl\" (UID: \"e21fe075-9b63-41c8-a283-7659139c4ecd\") " pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:26 crc kubenswrapper[5014]: I1006 23:03:26.941590 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:27 crc kubenswrapper[5014]: I1006 23:03:27.501904 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5c84bf6665-5lrrl"] Oct 06 23:03:27 crc kubenswrapper[5014]: I1006 23:03:27.716354 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c84bf6665-5lrrl" event={"ID":"e21fe075-9b63-41c8-a283-7659139c4ecd","Type":"ContainerStarted","Data":"14417907f6c8259db8526079840b6399b43db48b2a6475059e932e9653ad1c3e"} Oct 06 23:03:28 crc kubenswrapper[5014]: I1006 23:03:28.724848 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c84bf6665-5lrrl" event={"ID":"e21fe075-9b63-41c8-a283-7659139c4ecd","Type":"ContainerStarted","Data":"bbc9f2427398beeb4ba5df02ed5e9c3e0c22406ebc7d4537b5f88d37fe735b3c"} Oct 06 23:03:28 crc kubenswrapper[5014]: I1006 23:03:28.725136 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c84bf6665-5lrrl" event={"ID":"e21fe075-9b63-41c8-a283-7659139c4ecd","Type":"ContainerStarted","Data":"e758aed672b25f0493e22aa7b8e6743d5af66e5f3403f3bc2cc6019944800923"} Oct 06 23:03:28 crc kubenswrapper[5014]: I1006 23:03:28.725829 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:28 crc kubenswrapper[5014]: I1006 23:03:28.752072 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5c84bf6665-5lrrl" podStartSLOduration=2.752054684 podStartE2EDuration="2.752054684s" podCreationTimestamp="2025-10-06 23:03:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-06 23:03:28.740478363 +0000 UTC m=+5554.033515137" watchObservedRunningTime="2025-10-06 23:03:28.752054684 +0000 UTC m=+5554.045091418" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.128013 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-fbc86f6b9-m59jq" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.209138 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c5b46b9d5-5v5cj"] Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.209409 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" podUID="b05d4daf-38d1-4aa6-a4b3-a623bad4543f" containerName="dnsmasq-dns" containerID="cri-o://0e89ed657f6058e866971770d6b296c9502177eeb9d334b7d5a5c36a3d587476" gracePeriod=10 Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.728560 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.786529 5014 generic.go:334] "Generic (PLEG): container finished" podID="b05d4daf-38d1-4aa6-a4b3-a623bad4543f" containerID="0e89ed657f6058e866971770d6b296c9502177eeb9d334b7d5a5c36a3d587476" exitCode=0 Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.786569 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" event={"ID":"b05d4daf-38d1-4aa6-a4b3-a623bad4543f","Type":"ContainerDied","Data":"0e89ed657f6058e866971770d6b296c9502177eeb9d334b7d5a5c36a3d587476"} Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.786603 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" event={"ID":"b05d4daf-38d1-4aa6-a4b3-a623bad4543f","Type":"ContainerDied","Data":"3d0f5a7d6f1ee7e5856637896cb36b9d4bda9fa8778a075e55f9351409a74589"} Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.786632 5014 scope.go:117] "RemoveContainer" containerID="0e89ed657f6058e866971770d6b296c9502177eeb9d334b7d5a5c36a3d587476" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.786743 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c5b46b9d5-5v5cj" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.814361 5014 scope.go:117] "RemoveContainer" containerID="cd0cc088dd0b537b2125fb8069d7f0ebf10ced483eb09460b1652461e763fa30" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.836975 5014 scope.go:117] "RemoveContainer" containerID="0e89ed657f6058e866971770d6b296c9502177eeb9d334b7d5a5c36a3d587476" Oct 06 23:03:34 crc kubenswrapper[5014]: E1006 23:03:34.837386 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e89ed657f6058e866971770d6b296c9502177eeb9d334b7d5a5c36a3d587476\": container with ID starting with 0e89ed657f6058e866971770d6b296c9502177eeb9d334b7d5a5c36a3d587476 not found: ID does not exist" containerID="0e89ed657f6058e866971770d6b296c9502177eeb9d334b7d5a5c36a3d587476" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.837446 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e89ed657f6058e866971770d6b296c9502177eeb9d334b7d5a5c36a3d587476"} err="failed to get container status \"0e89ed657f6058e866971770d6b296c9502177eeb9d334b7d5a5c36a3d587476\": rpc error: code = NotFound desc = could not find container \"0e89ed657f6058e866971770d6b296c9502177eeb9d334b7d5a5c36a3d587476\": container with ID starting with 0e89ed657f6058e866971770d6b296c9502177eeb9d334b7d5a5c36a3d587476 not found: ID does not exist" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.837474 5014 scope.go:117] "RemoveContainer" containerID="cd0cc088dd0b537b2125fb8069d7f0ebf10ced483eb09460b1652461e763fa30" Oct 06 23:03:34 crc kubenswrapper[5014]: E1006 23:03:34.837810 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd0cc088dd0b537b2125fb8069d7f0ebf10ced483eb09460b1652461e763fa30\": container with ID starting with cd0cc088dd0b537b2125fb8069d7f0ebf10ced483eb09460b1652461e763fa30 not found: ID does not exist" containerID="cd0cc088dd0b537b2125fb8069d7f0ebf10ced483eb09460b1652461e763fa30" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.837835 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd0cc088dd0b537b2125fb8069d7f0ebf10ced483eb09460b1652461e763fa30"} err="failed to get container status \"cd0cc088dd0b537b2125fb8069d7f0ebf10ced483eb09460b1652461e763fa30\": rpc error: code = NotFound desc = could not find container \"cd0cc088dd0b537b2125fb8069d7f0ebf10ced483eb09460b1652461e763fa30\": container with ID starting with cd0cc088dd0b537b2125fb8069d7f0ebf10ced483eb09460b1652461e763fa30 not found: ID does not exist" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.839509 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqldn\" (UniqueName: \"kubernetes.io/projected/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-kube-api-access-gqldn\") pod \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.839656 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-ovsdbserver-sb\") pod \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.839722 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-config\") pod \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.839771 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-ovsdbserver-nb\") pod \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.839794 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-dns-svc\") pod \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\" (UID: \"b05d4daf-38d1-4aa6-a4b3-a623bad4543f\") " Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.847294 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-kube-api-access-gqldn" (OuterVolumeSpecName: "kube-api-access-gqldn") pod "b05d4daf-38d1-4aa6-a4b3-a623bad4543f" (UID: "b05d4daf-38d1-4aa6-a4b3-a623bad4543f"). InnerVolumeSpecName "kube-api-access-gqldn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.890509 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b05d4daf-38d1-4aa6-a4b3-a623bad4543f" (UID: "b05d4daf-38d1-4aa6-a4b3-a623bad4543f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.890535 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-config" (OuterVolumeSpecName: "config") pod "b05d4daf-38d1-4aa6-a4b3-a623bad4543f" (UID: "b05d4daf-38d1-4aa6-a4b3-a623bad4543f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.890553 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b05d4daf-38d1-4aa6-a4b3-a623bad4543f" (UID: "b05d4daf-38d1-4aa6-a4b3-a623bad4543f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.899246 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b05d4daf-38d1-4aa6-a4b3-a623bad4543f" (UID: "b05d4daf-38d1-4aa6-a4b3-a623bad4543f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.942339 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.942381 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-config\") on node \"crc\" DevicePath \"\"" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.942393 5014 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.942401 5014 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 06 23:03:34 crc kubenswrapper[5014]: I1006 23:03:34.942411 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqldn\" (UniqueName: \"kubernetes.io/projected/b05d4daf-38d1-4aa6-a4b3-a623bad4543f-kube-api-access-gqldn\") on node \"crc\" DevicePath \"\"" Oct 06 23:03:35 crc kubenswrapper[5014]: I1006 23:03:35.118352 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c5b46b9d5-5v5cj"] Oct 06 23:03:35 crc kubenswrapper[5014]: I1006 23:03:35.125206 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-c5b46b9d5-5v5cj"] Oct 06 23:03:35 crc kubenswrapper[5014]: I1006 23:03:35.500295 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b05d4daf-38d1-4aa6-a4b3-a623bad4543f" path="/var/lib/kubelet/pods/b05d4daf-38d1-4aa6-a4b3-a623bad4543f/volumes" Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.188562 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lc5wn"] Oct 06 23:03:54 crc kubenswrapper[5014]: E1006 23:03:54.189545 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b05d4daf-38d1-4aa6-a4b3-a623bad4543f" containerName="dnsmasq-dns" Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.189559 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="b05d4daf-38d1-4aa6-a4b3-a623bad4543f" containerName="dnsmasq-dns" Oct 06 23:03:54 crc kubenswrapper[5014]: E1006 23:03:54.189586 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b05d4daf-38d1-4aa6-a4b3-a623bad4543f" containerName="init" Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.189594 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="b05d4daf-38d1-4aa6-a4b3-a623bad4543f" containerName="init" Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.189819 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="b05d4daf-38d1-4aa6-a4b3-a623bad4543f" containerName="dnsmasq-dns" Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.191856 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.205395 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lc5wn"] Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.217756 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.239850 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-utilities\") pod \"redhat-operators-lc5wn\" (UID: \"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b\") " pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.240069 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrmdd\" (UniqueName: \"kubernetes.io/projected/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-kube-api-access-mrmdd\") pod \"redhat-operators-lc5wn\" (UID: \"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b\") " pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.240492 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-catalog-content\") pod \"redhat-operators-lc5wn\" (UID: \"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b\") " pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.342169 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-catalog-content\") pod \"redhat-operators-lc5wn\" (UID: \"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b\") " pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.342240 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-utilities\") pod \"redhat-operators-lc5wn\" (UID: \"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b\") " pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.342298 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrmdd\" (UniqueName: \"kubernetes.io/projected/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-kube-api-access-mrmdd\") pod \"redhat-operators-lc5wn\" (UID: \"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b\") " pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.342968 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-catalog-content\") pod \"redhat-operators-lc5wn\" (UID: \"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b\") " pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.343420 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-utilities\") pod \"redhat-operators-lc5wn\" (UID: \"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b\") " pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.370676 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrmdd\" (UniqueName: \"kubernetes.io/projected/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-kube-api-access-mrmdd\") pod \"redhat-operators-lc5wn\" (UID: \"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b\") " pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:03:54 crc kubenswrapper[5014]: I1006 23:03:54.518034 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:03:55 crc kubenswrapper[5014]: I1006 23:03:55.047724 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lc5wn"] Oct 06 23:03:56 crc kubenswrapper[5014]: I1006 23:03:56.040212 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lc5wn" event={"ID":"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b","Type":"ContainerDied","Data":"a725cb63b44ec91e65ba8ec982756acc1de532875f41dcfc3597e5374c4d4d91"} Oct 06 23:03:56 crc kubenswrapper[5014]: I1006 23:03:56.040525 5014 generic.go:334] "Generic (PLEG): container finished" podID="bc2c6ea4-dd8b-46b0-86aa-4047bc93979b" containerID="a725cb63b44ec91e65ba8ec982756acc1de532875f41dcfc3597e5374c4d4d91" exitCode=0 Oct 06 23:03:56 crc kubenswrapper[5014]: I1006 23:03:56.040571 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lc5wn" event={"ID":"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b","Type":"ContainerStarted","Data":"6a489d072e895285ce558c110aba1e433d9d4269260f49f416f27ef4201744bc"} Oct 06 23:03:56 crc kubenswrapper[5014]: I1006 23:03:56.047576 5014 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 06 23:03:56 crc kubenswrapper[5014]: I1006 23:03:56.982378 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5c84bf6665-5lrrl" Oct 06 23:03:57 crc kubenswrapper[5014]: I1006 23:03:57.079282 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lc5wn" event={"ID":"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b","Type":"ContainerStarted","Data":"fc26fd8aa872f69a2a15559698fae4ebfc14af05965725f38f5235401230a038"} Oct 06 23:03:57 crc kubenswrapper[5014]: I1006 23:03:57.096346 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8556d5b956-xz6lp"] Oct 06 23:03:57 crc kubenswrapper[5014]: I1006 23:03:57.096825 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-8556d5b956-xz6lp" podUID="88fc993d-54d9-4884-ab8a-a420511f8dd9" containerName="neutron-api" containerID="cri-o://60c4f37b3abb5e2eaa04c764613a16c09d8a80011f32afaefe246d67075bb4d8" gracePeriod=30 Oct 06 23:03:57 crc kubenswrapper[5014]: I1006 23:03:57.096889 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-8556d5b956-xz6lp" podUID="88fc993d-54d9-4884-ab8a-a420511f8dd9" containerName="neutron-httpd" containerID="cri-o://2acfc78fde0b1aa8e199b46074cd058f9cfb5b4b7119edf3cdaf1bb32c824d17" gracePeriod=30 Oct 06 23:03:58 crc kubenswrapper[5014]: I1006 23:03:58.091098 5014 generic.go:334] "Generic (PLEG): container finished" podID="88fc993d-54d9-4884-ab8a-a420511f8dd9" containerID="2acfc78fde0b1aa8e199b46074cd058f9cfb5b4b7119edf3cdaf1bb32c824d17" exitCode=0 Oct 06 23:03:58 crc kubenswrapper[5014]: I1006 23:03:58.094267 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8556d5b956-xz6lp" event={"ID":"88fc993d-54d9-4884-ab8a-a420511f8dd9","Type":"ContainerDied","Data":"2acfc78fde0b1aa8e199b46074cd058f9cfb5b4b7119edf3cdaf1bb32c824d17"} Oct 06 23:03:58 crc kubenswrapper[5014]: I1006 23:03:58.097662 5014 generic.go:334] "Generic (PLEG): container finished" podID="bc2c6ea4-dd8b-46b0-86aa-4047bc93979b" containerID="fc26fd8aa872f69a2a15559698fae4ebfc14af05965725f38f5235401230a038" exitCode=0 Oct 06 23:03:58 crc kubenswrapper[5014]: I1006 23:03:58.102254 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lc5wn" event={"ID":"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b","Type":"ContainerDied","Data":"fc26fd8aa872f69a2a15559698fae4ebfc14af05965725f38f5235401230a038"} Oct 06 23:03:59 crc kubenswrapper[5014]: I1006 23:03:59.113087 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lc5wn" event={"ID":"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b","Type":"ContainerStarted","Data":"e06e747f35236f8ab8aed8d695b18c76830d26bb7596364aefd344b9c3af7479"} Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.633144 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.655056 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lc5wn" podStartSLOduration=4.159547758 podStartE2EDuration="6.655041786s" podCreationTimestamp="2025-10-06 23:03:54 +0000 UTC" firstStartedPulling="2025-10-06 23:03:56.047327239 +0000 UTC m=+5581.340363983" lastFinishedPulling="2025-10-06 23:03:58.542821247 +0000 UTC m=+5583.835858011" observedRunningTime="2025-10-06 23:03:59.153331127 +0000 UTC m=+5584.446367861" watchObservedRunningTime="2025-10-06 23:04:00.655041786 +0000 UTC m=+5585.948078520" Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.664077 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5rbf\" (UniqueName: \"kubernetes.io/projected/88fc993d-54d9-4884-ab8a-a420511f8dd9-kube-api-access-f5rbf\") pod \"88fc993d-54d9-4884-ab8a-a420511f8dd9\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.664447 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-config\") pod \"88fc993d-54d9-4884-ab8a-a420511f8dd9\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.664492 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-httpd-config\") pod \"88fc993d-54d9-4884-ab8a-a420511f8dd9\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.664517 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-combined-ca-bundle\") pod \"88fc993d-54d9-4884-ab8a-a420511f8dd9\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.664561 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-ovndb-tls-certs\") pod \"88fc993d-54d9-4884-ab8a-a420511f8dd9\" (UID: \"88fc993d-54d9-4884-ab8a-a420511f8dd9\") " Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.676809 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88fc993d-54d9-4884-ab8a-a420511f8dd9-kube-api-access-f5rbf" (OuterVolumeSpecName: "kube-api-access-f5rbf") pod "88fc993d-54d9-4884-ab8a-a420511f8dd9" (UID: "88fc993d-54d9-4884-ab8a-a420511f8dd9"). InnerVolumeSpecName "kube-api-access-f5rbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.681753 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "88fc993d-54d9-4884-ab8a-a420511f8dd9" (UID: "88fc993d-54d9-4884-ab8a-a420511f8dd9"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.730334 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-config" (OuterVolumeSpecName: "config") pod "88fc993d-54d9-4884-ab8a-a420511f8dd9" (UID: "88fc993d-54d9-4884-ab8a-a420511f8dd9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.754013 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "88fc993d-54d9-4884-ab8a-a420511f8dd9" (UID: "88fc993d-54d9-4884-ab8a-a420511f8dd9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.766786 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5rbf\" (UniqueName: \"kubernetes.io/projected/88fc993d-54d9-4884-ab8a-a420511f8dd9-kube-api-access-f5rbf\") on node \"crc\" DevicePath \"\"" Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.766822 5014 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-config\") on node \"crc\" DevicePath \"\"" Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.766834 5014 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-httpd-config\") on node \"crc\" DevicePath \"\"" Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.766844 5014 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.776373 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "88fc993d-54d9-4884-ab8a-a420511f8dd9" (UID: "88fc993d-54d9-4884-ab8a-a420511f8dd9"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 06 23:04:00 crc kubenswrapper[5014]: I1006 23:04:00.869174 5014 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/88fc993d-54d9-4884-ab8a-a420511f8dd9-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 06 23:04:01 crc kubenswrapper[5014]: I1006 23:04:01.133229 5014 generic.go:334] "Generic (PLEG): container finished" podID="88fc993d-54d9-4884-ab8a-a420511f8dd9" containerID="60c4f37b3abb5e2eaa04c764613a16c09d8a80011f32afaefe246d67075bb4d8" exitCode=0 Oct 06 23:04:01 crc kubenswrapper[5014]: I1006 23:04:01.133303 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8556d5b956-xz6lp" event={"ID":"88fc993d-54d9-4884-ab8a-a420511f8dd9","Type":"ContainerDied","Data":"60c4f37b3abb5e2eaa04c764613a16c09d8a80011f32afaefe246d67075bb4d8"} Oct 06 23:04:01 crc kubenswrapper[5014]: I1006 23:04:01.133316 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8556d5b956-xz6lp" Oct 06 23:04:01 crc kubenswrapper[5014]: I1006 23:04:01.133347 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8556d5b956-xz6lp" event={"ID":"88fc993d-54d9-4884-ab8a-a420511f8dd9","Type":"ContainerDied","Data":"3399bb4182d33a028bcb9c201eb729c15910c0cef4ecbcd3c105c3417cd8bfac"} Oct 06 23:04:01 crc kubenswrapper[5014]: I1006 23:04:01.133366 5014 scope.go:117] "RemoveContainer" containerID="2acfc78fde0b1aa8e199b46074cd058f9cfb5b4b7119edf3cdaf1bb32c824d17" Oct 06 23:04:01 crc kubenswrapper[5014]: I1006 23:04:01.211993 5014 scope.go:117] "RemoveContainer" containerID="60c4f37b3abb5e2eaa04c764613a16c09d8a80011f32afaefe246d67075bb4d8" Oct 06 23:04:01 crc kubenswrapper[5014]: I1006 23:04:01.217853 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8556d5b956-xz6lp"] Oct 06 23:04:01 crc kubenswrapper[5014]: I1006 23:04:01.224561 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-8556d5b956-xz6lp"] Oct 06 23:04:01 crc kubenswrapper[5014]: I1006 23:04:01.247989 5014 scope.go:117] "RemoveContainer" containerID="2acfc78fde0b1aa8e199b46074cd058f9cfb5b4b7119edf3cdaf1bb32c824d17" Oct 06 23:04:01 crc kubenswrapper[5014]: E1006 23:04:01.248523 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2acfc78fde0b1aa8e199b46074cd058f9cfb5b4b7119edf3cdaf1bb32c824d17\": container with ID starting with 2acfc78fde0b1aa8e199b46074cd058f9cfb5b4b7119edf3cdaf1bb32c824d17 not found: ID does not exist" containerID="2acfc78fde0b1aa8e199b46074cd058f9cfb5b4b7119edf3cdaf1bb32c824d17" Oct 06 23:04:01 crc kubenswrapper[5014]: I1006 23:04:01.248553 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2acfc78fde0b1aa8e199b46074cd058f9cfb5b4b7119edf3cdaf1bb32c824d17"} err="failed to get container status \"2acfc78fde0b1aa8e199b46074cd058f9cfb5b4b7119edf3cdaf1bb32c824d17\": rpc error: code = NotFound desc = could not find container \"2acfc78fde0b1aa8e199b46074cd058f9cfb5b4b7119edf3cdaf1bb32c824d17\": container with ID starting with 2acfc78fde0b1aa8e199b46074cd058f9cfb5b4b7119edf3cdaf1bb32c824d17 not found: ID does not exist" Oct 06 23:04:01 crc kubenswrapper[5014]: I1006 23:04:01.248574 5014 scope.go:117] "RemoveContainer" containerID="60c4f37b3abb5e2eaa04c764613a16c09d8a80011f32afaefe246d67075bb4d8" Oct 06 23:04:01 crc kubenswrapper[5014]: E1006 23:04:01.248958 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60c4f37b3abb5e2eaa04c764613a16c09d8a80011f32afaefe246d67075bb4d8\": container with ID starting with 60c4f37b3abb5e2eaa04c764613a16c09d8a80011f32afaefe246d67075bb4d8 not found: ID does not exist" containerID="60c4f37b3abb5e2eaa04c764613a16c09d8a80011f32afaefe246d67075bb4d8" Oct 06 23:04:01 crc kubenswrapper[5014]: I1006 23:04:01.248982 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60c4f37b3abb5e2eaa04c764613a16c09d8a80011f32afaefe246d67075bb4d8"} err="failed to get container status \"60c4f37b3abb5e2eaa04c764613a16c09d8a80011f32afaefe246d67075bb4d8\": rpc error: code = NotFound desc = could not find container \"60c4f37b3abb5e2eaa04c764613a16c09d8a80011f32afaefe246d67075bb4d8\": container with ID starting with 60c4f37b3abb5e2eaa04c764613a16c09d8a80011f32afaefe246d67075bb4d8 not found: ID does not exist" Oct 06 23:04:01 crc kubenswrapper[5014]: I1006 23:04:01.499158 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88fc993d-54d9-4884-ab8a-a420511f8dd9" path="/var/lib/kubelet/pods/88fc993d-54d9-4884-ab8a-a420511f8dd9/volumes" Oct 06 23:04:04 crc kubenswrapper[5014]: I1006 23:04:04.519225 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:04:04 crc kubenswrapper[5014]: I1006 23:04:04.519582 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:04:04 crc kubenswrapper[5014]: I1006 23:04:04.598880 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:04:05 crc kubenswrapper[5014]: I1006 23:04:05.251832 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:04:05 crc kubenswrapper[5014]: I1006 23:04:05.322879 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lc5wn"] Oct 06 23:04:07 crc kubenswrapper[5014]: I1006 23:04:07.201406 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lc5wn" podUID="bc2c6ea4-dd8b-46b0-86aa-4047bc93979b" containerName="registry-server" containerID="cri-o://e06e747f35236f8ab8aed8d695b18c76830d26bb7596364aefd344b9c3af7479" gracePeriod=2 Oct 06 23:04:08 crc kubenswrapper[5014]: I1006 23:04:08.220325 5014 generic.go:334] "Generic (PLEG): container finished" podID="bc2c6ea4-dd8b-46b0-86aa-4047bc93979b" containerID="e06e747f35236f8ab8aed8d695b18c76830d26bb7596364aefd344b9c3af7479" exitCode=0 Oct 06 23:04:08 crc kubenswrapper[5014]: I1006 23:04:08.220409 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lc5wn" event={"ID":"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b","Type":"ContainerDied","Data":"e06e747f35236f8ab8aed8d695b18c76830d26bb7596364aefd344b9c3af7479"} Oct 06 23:04:08 crc kubenswrapper[5014]: I1006 23:04:08.847339 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.047669 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mrmdd\" (UniqueName: \"kubernetes.io/projected/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-kube-api-access-mrmdd\") pod \"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b\" (UID: \"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b\") " Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.048105 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-catalog-content\") pod \"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b\" (UID: \"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b\") " Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.049978 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-utilities\") pod \"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b\" (UID: \"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b\") " Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.051207 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-utilities" (OuterVolumeSpecName: "utilities") pod "bc2c6ea4-dd8b-46b0-86aa-4047bc93979b" (UID: "bc2c6ea4-dd8b-46b0-86aa-4047bc93979b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.057940 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-kube-api-access-mrmdd" (OuterVolumeSpecName: "kube-api-access-mrmdd") pod "bc2c6ea4-dd8b-46b0-86aa-4047bc93979b" (UID: "bc2c6ea4-dd8b-46b0-86aa-4047bc93979b"). InnerVolumeSpecName "kube-api-access-mrmdd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.154336 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bc2c6ea4-dd8b-46b0-86aa-4047bc93979b" (UID: "bc2c6ea4-dd8b-46b0-86aa-4047bc93979b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.154755 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.154857 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mrmdd\" (UniqueName: \"kubernetes.io/projected/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-kube-api-access-mrmdd\") on node \"crc\" DevicePath \"\"" Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.154901 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.233325 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lc5wn" event={"ID":"bc2c6ea4-dd8b-46b0-86aa-4047bc93979b","Type":"ContainerDied","Data":"6a489d072e895285ce558c110aba1e433d9d4269260f49f416f27ef4201744bc"} Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.233424 5014 scope.go:117] "RemoveContainer" containerID="e06e747f35236f8ab8aed8d695b18c76830d26bb7596364aefd344b9c3af7479" Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.233507 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lc5wn" Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.281854 5014 scope.go:117] "RemoveContainer" containerID="fc26fd8aa872f69a2a15559698fae4ebfc14af05965725f38f5235401230a038" Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.292126 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lc5wn"] Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.299400 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lc5wn"] Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.308233 5014 scope.go:117] "RemoveContainer" containerID="a725cb63b44ec91e65ba8ec982756acc1de532875f41dcfc3597e5374c4d4d91" Oct 06 23:04:09 crc kubenswrapper[5014]: I1006 23:04:09.496717 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc2c6ea4-dd8b-46b0-86aa-4047bc93979b" path="/var/lib/kubelet/pods/bc2c6ea4-dd8b-46b0-86aa-4047bc93979b/volumes" Oct 06 23:05:03 crc kubenswrapper[5014]: I1006 23:05:03.941743 5014 scope.go:117] "RemoveContainer" containerID="b14068296ce0d8948d483bf3157530f1642f40b4eb8657f880194f351d7cb1f8" Oct 06 23:05:03 crc kubenswrapper[5014]: I1006 23:05:03.976765 5014 scope.go:117] "RemoveContainer" containerID="2fc390bd6833a786bc3cfeaee692a0e4f2b6a470c195732019b0ae98427ae5c6" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.275723 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bhbtc"] Oct 06 23:05:07 crc kubenswrapper[5014]: E1006 23:05:07.276670 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc2c6ea4-dd8b-46b0-86aa-4047bc93979b" containerName="extract-utilities" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.276694 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc2c6ea4-dd8b-46b0-86aa-4047bc93979b" containerName="extract-utilities" Oct 06 23:05:07 crc kubenswrapper[5014]: E1006 23:05:07.276724 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc2c6ea4-dd8b-46b0-86aa-4047bc93979b" containerName="extract-content" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.276736 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc2c6ea4-dd8b-46b0-86aa-4047bc93979b" containerName="extract-content" Oct 06 23:05:07 crc kubenswrapper[5014]: E1006 23:05:07.276755 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc2c6ea4-dd8b-46b0-86aa-4047bc93979b" containerName="registry-server" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.276770 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc2c6ea4-dd8b-46b0-86aa-4047bc93979b" containerName="registry-server" Oct 06 23:05:07 crc kubenswrapper[5014]: E1006 23:05:07.276810 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88fc993d-54d9-4884-ab8a-a420511f8dd9" containerName="neutron-api" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.276822 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="88fc993d-54d9-4884-ab8a-a420511f8dd9" containerName="neutron-api" Oct 06 23:05:07 crc kubenswrapper[5014]: E1006 23:05:07.276844 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88fc993d-54d9-4884-ab8a-a420511f8dd9" containerName="neutron-httpd" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.276855 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="88fc993d-54d9-4884-ab8a-a420511f8dd9" containerName="neutron-httpd" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.277150 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="88fc993d-54d9-4884-ab8a-a420511f8dd9" containerName="neutron-httpd" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.277174 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="88fc993d-54d9-4884-ab8a-a420511f8dd9" containerName="neutron-api" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.277196 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc2c6ea4-dd8b-46b0-86aa-4047bc93979b" containerName="registry-server" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.283411 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.295201 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bhbtc"] Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.424482 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0fc5d59-3772-4f0d-941e-711604760b3f-utilities\") pod \"community-operators-bhbtc\" (UID: \"d0fc5d59-3772-4f0d-941e-711604760b3f\") " pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.424534 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0fc5d59-3772-4f0d-941e-711604760b3f-catalog-content\") pod \"community-operators-bhbtc\" (UID: \"d0fc5d59-3772-4f0d-941e-711604760b3f\") " pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.424577 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpkm7\" (UniqueName: \"kubernetes.io/projected/d0fc5d59-3772-4f0d-941e-711604760b3f-kube-api-access-mpkm7\") pod \"community-operators-bhbtc\" (UID: \"d0fc5d59-3772-4f0d-941e-711604760b3f\") " pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.525860 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0fc5d59-3772-4f0d-941e-711604760b3f-utilities\") pod \"community-operators-bhbtc\" (UID: \"d0fc5d59-3772-4f0d-941e-711604760b3f\") " pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.525893 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0fc5d59-3772-4f0d-941e-711604760b3f-catalog-content\") pod \"community-operators-bhbtc\" (UID: \"d0fc5d59-3772-4f0d-941e-711604760b3f\") " pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.525924 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpkm7\" (UniqueName: \"kubernetes.io/projected/d0fc5d59-3772-4f0d-941e-711604760b3f-kube-api-access-mpkm7\") pod \"community-operators-bhbtc\" (UID: \"d0fc5d59-3772-4f0d-941e-711604760b3f\") " pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.526404 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0fc5d59-3772-4f0d-941e-711604760b3f-utilities\") pod \"community-operators-bhbtc\" (UID: \"d0fc5d59-3772-4f0d-941e-711604760b3f\") " pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.527483 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0fc5d59-3772-4f0d-941e-711604760b3f-catalog-content\") pod \"community-operators-bhbtc\" (UID: \"d0fc5d59-3772-4f0d-941e-711604760b3f\") " pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.544052 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpkm7\" (UniqueName: \"kubernetes.io/projected/d0fc5d59-3772-4f0d-941e-711604760b3f-kube-api-access-mpkm7\") pod \"community-operators-bhbtc\" (UID: \"d0fc5d59-3772-4f0d-941e-711604760b3f\") " pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:07 crc kubenswrapper[5014]: I1006 23:05:07.626096 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:08 crc kubenswrapper[5014]: I1006 23:05:08.089432 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bhbtc"] Oct 06 23:05:08 crc kubenswrapper[5014]: E1006 23:05:08.540742 5014 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd0fc5d59_3772_4f0d_941e_711604760b3f.slice/crio-conmon-c8b03b076b502f7737cfb060424a1de39b6bfb54e2baf2bd5419bfebd6b89fd1.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd0fc5d59_3772_4f0d_941e_711604760b3f.slice/crio-c8b03b076b502f7737cfb060424a1de39b6bfb54e2baf2bd5419bfebd6b89fd1.scope\": RecentStats: unable to find data in memory cache]" Oct 06 23:05:08 crc kubenswrapper[5014]: I1006 23:05:08.913988 5014 generic.go:334] "Generic (PLEG): container finished" podID="d0fc5d59-3772-4f0d-941e-711604760b3f" containerID="c8b03b076b502f7737cfb060424a1de39b6bfb54e2baf2bd5419bfebd6b89fd1" exitCode=0 Oct 06 23:05:08 crc kubenswrapper[5014]: I1006 23:05:08.914072 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bhbtc" event={"ID":"d0fc5d59-3772-4f0d-941e-711604760b3f","Type":"ContainerDied","Data":"c8b03b076b502f7737cfb060424a1de39b6bfb54e2baf2bd5419bfebd6b89fd1"} Oct 06 23:05:08 crc kubenswrapper[5014]: I1006 23:05:08.914502 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bhbtc" event={"ID":"d0fc5d59-3772-4f0d-941e-711604760b3f","Type":"ContainerStarted","Data":"416cf0159a1bd79cb27362d1c6f9ba436d6d03df57bd9c3a23b2994f636fa3dd"} Oct 06 23:05:09 crc kubenswrapper[5014]: I1006 23:05:09.929754 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bhbtc" event={"ID":"d0fc5d59-3772-4f0d-941e-711604760b3f","Type":"ContainerStarted","Data":"3689b2c597bddb9bcf79d9b6efcbe012b095af713d0cf96e9c9fbceacb211f8b"} Oct 06 23:05:10 crc kubenswrapper[5014]: I1006 23:05:10.612385 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-w49rt/must-gather-9jfkz"] Oct 06 23:05:10 crc kubenswrapper[5014]: I1006 23:05:10.613947 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w49rt/must-gather-9jfkz" Oct 06 23:05:10 crc kubenswrapper[5014]: I1006 23:05:10.617193 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-w49rt"/"openshift-service-ca.crt" Oct 06 23:05:10 crc kubenswrapper[5014]: I1006 23:05:10.617384 5014 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-w49rt"/"default-dockercfg-2tpqm" Oct 06 23:05:10 crc kubenswrapper[5014]: I1006 23:05:10.620263 5014 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-w49rt"/"kube-root-ca.crt" Oct 06 23:05:10 crc kubenswrapper[5014]: I1006 23:05:10.621878 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-w49rt/must-gather-9jfkz"] Oct 06 23:05:10 crc kubenswrapper[5014]: I1006 23:05:10.783445 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59-must-gather-output\") pod \"must-gather-9jfkz\" (UID: \"37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59\") " pod="openshift-must-gather-w49rt/must-gather-9jfkz" Oct 06 23:05:10 crc kubenswrapper[5014]: I1006 23:05:10.783854 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhk8s\" (UniqueName: \"kubernetes.io/projected/37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59-kube-api-access-xhk8s\") pod \"must-gather-9jfkz\" (UID: \"37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59\") " pod="openshift-must-gather-w49rt/must-gather-9jfkz" Oct 06 23:05:10 crc kubenswrapper[5014]: I1006 23:05:10.886255 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhk8s\" (UniqueName: \"kubernetes.io/projected/37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59-kube-api-access-xhk8s\") pod \"must-gather-9jfkz\" (UID: \"37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59\") " pod="openshift-must-gather-w49rt/must-gather-9jfkz" Oct 06 23:05:10 crc kubenswrapper[5014]: I1006 23:05:10.887454 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59-must-gather-output\") pod \"must-gather-9jfkz\" (UID: \"37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59\") " pod="openshift-must-gather-w49rt/must-gather-9jfkz" Oct 06 23:05:10 crc kubenswrapper[5014]: I1006 23:05:10.887910 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59-must-gather-output\") pod \"must-gather-9jfkz\" (UID: \"37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59\") " pod="openshift-must-gather-w49rt/must-gather-9jfkz" Oct 06 23:05:10 crc kubenswrapper[5014]: I1006 23:05:10.922501 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhk8s\" (UniqueName: \"kubernetes.io/projected/37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59-kube-api-access-xhk8s\") pod \"must-gather-9jfkz\" (UID: \"37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59\") " pod="openshift-must-gather-w49rt/must-gather-9jfkz" Oct 06 23:05:10 crc kubenswrapper[5014]: I1006 23:05:10.932652 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w49rt/must-gather-9jfkz" Oct 06 23:05:10 crc kubenswrapper[5014]: I1006 23:05:10.942101 5014 generic.go:334] "Generic (PLEG): container finished" podID="d0fc5d59-3772-4f0d-941e-711604760b3f" containerID="3689b2c597bddb9bcf79d9b6efcbe012b095af713d0cf96e9c9fbceacb211f8b" exitCode=0 Oct 06 23:05:10 crc kubenswrapper[5014]: I1006 23:05:10.942146 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bhbtc" event={"ID":"d0fc5d59-3772-4f0d-941e-711604760b3f","Type":"ContainerDied","Data":"3689b2c597bddb9bcf79d9b6efcbe012b095af713d0cf96e9c9fbceacb211f8b"} Oct 06 23:05:11 crc kubenswrapper[5014]: I1006 23:05:11.395541 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-w49rt/must-gather-9jfkz"] Oct 06 23:05:11 crc kubenswrapper[5014]: I1006 23:05:11.971772 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bhbtc" event={"ID":"d0fc5d59-3772-4f0d-941e-711604760b3f","Type":"ContainerStarted","Data":"dcce5cd2605ff9cac2e52e442ccb3271433aed11b96a5b38be42de529edc359c"} Oct 06 23:05:11 crc kubenswrapper[5014]: I1006 23:05:11.974793 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w49rt/must-gather-9jfkz" event={"ID":"37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59","Type":"ContainerStarted","Data":"e04a9e046a4a94c73900563921f28eae1d2607e5c2d0a0f8cce4d49c2b267859"} Oct 06 23:05:11 crc kubenswrapper[5014]: I1006 23:05:11.998024 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bhbtc" podStartSLOduration=2.523293394 podStartE2EDuration="4.998004834s" podCreationTimestamp="2025-10-06 23:05:07 +0000 UTC" firstStartedPulling="2025-10-06 23:05:08.917688168 +0000 UTC m=+5654.210724942" lastFinishedPulling="2025-10-06 23:05:11.392399648 +0000 UTC m=+5656.685436382" observedRunningTime="2025-10-06 23:05:11.997185229 +0000 UTC m=+5657.290221973" watchObservedRunningTime="2025-10-06 23:05:11.998004834 +0000 UTC m=+5657.291041568" Oct 06 23:05:16 crc kubenswrapper[5014]: I1006 23:05:16.043258 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w49rt/must-gather-9jfkz" event={"ID":"37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59","Type":"ContainerStarted","Data":"1b18d7ee9413fd18d6b02b039b21e66bbffd7e81f17e60a2f68177d33adfdff2"} Oct 06 23:05:16 crc kubenswrapper[5014]: I1006 23:05:16.043784 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w49rt/must-gather-9jfkz" event={"ID":"37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59","Type":"ContainerStarted","Data":"31d0dd82677de620590608d7659fb0a431036e9d7c0f3553decc48d6858ab0cd"} Oct 06 23:05:16 crc kubenswrapper[5014]: I1006 23:05:16.073863 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-w49rt/must-gather-9jfkz" podStartSLOduration=2.131722378 podStartE2EDuration="6.073846906s" podCreationTimestamp="2025-10-06 23:05:10 +0000 UTC" firstStartedPulling="2025-10-06 23:05:11.419949817 +0000 UTC m=+5656.712986551" lastFinishedPulling="2025-10-06 23:05:15.362074345 +0000 UTC m=+5660.655111079" observedRunningTime="2025-10-06 23:05:16.072734051 +0000 UTC m=+5661.365770795" watchObservedRunningTime="2025-10-06 23:05:16.073846906 +0000 UTC m=+5661.366883640" Oct 06 23:05:17 crc kubenswrapper[5014]: I1006 23:05:17.626259 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:17 crc kubenswrapper[5014]: I1006 23:05:17.626834 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:17 crc kubenswrapper[5014]: I1006 23:05:17.676171 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:18 crc kubenswrapper[5014]: I1006 23:05:18.115521 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:18 crc kubenswrapper[5014]: I1006 23:05:18.169948 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bhbtc"] Oct 06 23:05:18 crc kubenswrapper[5014]: I1006 23:05:18.372410 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-w49rt/crc-debug-fz6fq"] Oct 06 23:05:18 crc kubenswrapper[5014]: I1006 23:05:18.374181 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w49rt/crc-debug-fz6fq" Oct 06 23:05:18 crc kubenswrapper[5014]: I1006 23:05:18.540665 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2f5be03b-0a81-489e-bc12-93898b726f55-host\") pod \"crc-debug-fz6fq\" (UID: \"2f5be03b-0a81-489e-bc12-93898b726f55\") " pod="openshift-must-gather-w49rt/crc-debug-fz6fq" Oct 06 23:05:18 crc kubenswrapper[5014]: I1006 23:05:18.540710 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8jhr\" (UniqueName: \"kubernetes.io/projected/2f5be03b-0a81-489e-bc12-93898b726f55-kube-api-access-j8jhr\") pod \"crc-debug-fz6fq\" (UID: \"2f5be03b-0a81-489e-bc12-93898b726f55\") " pod="openshift-must-gather-w49rt/crc-debug-fz6fq" Oct 06 23:05:18 crc kubenswrapper[5014]: I1006 23:05:18.642137 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2f5be03b-0a81-489e-bc12-93898b726f55-host\") pod \"crc-debug-fz6fq\" (UID: \"2f5be03b-0a81-489e-bc12-93898b726f55\") " pod="openshift-must-gather-w49rt/crc-debug-fz6fq" Oct 06 23:05:18 crc kubenswrapper[5014]: I1006 23:05:18.642194 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8jhr\" (UniqueName: \"kubernetes.io/projected/2f5be03b-0a81-489e-bc12-93898b726f55-kube-api-access-j8jhr\") pod \"crc-debug-fz6fq\" (UID: \"2f5be03b-0a81-489e-bc12-93898b726f55\") " pod="openshift-must-gather-w49rt/crc-debug-fz6fq" Oct 06 23:05:18 crc kubenswrapper[5014]: I1006 23:05:18.642331 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2f5be03b-0a81-489e-bc12-93898b726f55-host\") pod \"crc-debug-fz6fq\" (UID: \"2f5be03b-0a81-489e-bc12-93898b726f55\") " pod="openshift-must-gather-w49rt/crc-debug-fz6fq" Oct 06 23:05:18 crc kubenswrapper[5014]: I1006 23:05:18.675613 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8jhr\" (UniqueName: \"kubernetes.io/projected/2f5be03b-0a81-489e-bc12-93898b726f55-kube-api-access-j8jhr\") pod \"crc-debug-fz6fq\" (UID: \"2f5be03b-0a81-489e-bc12-93898b726f55\") " pod="openshift-must-gather-w49rt/crc-debug-fz6fq" Oct 06 23:05:18 crc kubenswrapper[5014]: I1006 23:05:18.691631 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w49rt/crc-debug-fz6fq" Oct 06 23:05:18 crc kubenswrapper[5014]: W1006 23:05:18.718688 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2f5be03b_0a81_489e_bc12_93898b726f55.slice/crio-ad38b86d54089687436dd0fbaa88e012db879e205906fe8e36050d25773822ee WatchSource:0}: Error finding container ad38b86d54089687436dd0fbaa88e012db879e205906fe8e36050d25773822ee: Status 404 returned error can't find the container with id ad38b86d54089687436dd0fbaa88e012db879e205906fe8e36050d25773822ee Oct 06 23:05:19 crc kubenswrapper[5014]: I1006 23:05:19.071605 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w49rt/crc-debug-fz6fq" event={"ID":"2f5be03b-0a81-489e-bc12-93898b726f55","Type":"ContainerStarted","Data":"ad38b86d54089687436dd0fbaa88e012db879e205906fe8e36050d25773822ee"} Oct 06 23:05:20 crc kubenswrapper[5014]: I1006 23:05:20.077883 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bhbtc" podUID="d0fc5d59-3772-4f0d-941e-711604760b3f" containerName="registry-server" containerID="cri-o://dcce5cd2605ff9cac2e52e442ccb3271433aed11b96a5b38be42de529edc359c" gracePeriod=2 Oct 06 23:05:20 crc kubenswrapper[5014]: I1006 23:05:20.926713 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.082788 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpkm7\" (UniqueName: \"kubernetes.io/projected/d0fc5d59-3772-4f0d-941e-711604760b3f-kube-api-access-mpkm7\") pod \"d0fc5d59-3772-4f0d-941e-711604760b3f\" (UID: \"d0fc5d59-3772-4f0d-941e-711604760b3f\") " Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.082829 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0fc5d59-3772-4f0d-941e-711604760b3f-utilities\") pod \"d0fc5d59-3772-4f0d-941e-711604760b3f\" (UID: \"d0fc5d59-3772-4f0d-941e-711604760b3f\") " Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.082956 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0fc5d59-3772-4f0d-941e-711604760b3f-catalog-content\") pod \"d0fc5d59-3772-4f0d-941e-711604760b3f\" (UID: \"d0fc5d59-3772-4f0d-941e-711604760b3f\") " Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.085119 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0fc5d59-3772-4f0d-941e-711604760b3f-utilities" (OuterVolumeSpecName: "utilities") pod "d0fc5d59-3772-4f0d-941e-711604760b3f" (UID: "d0fc5d59-3772-4f0d-941e-711604760b3f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.096534 5014 generic.go:334] "Generic (PLEG): container finished" podID="d0fc5d59-3772-4f0d-941e-711604760b3f" containerID="dcce5cd2605ff9cac2e52e442ccb3271433aed11b96a5b38be42de529edc359c" exitCode=0 Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.096583 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bhbtc" event={"ID":"d0fc5d59-3772-4f0d-941e-711604760b3f","Type":"ContainerDied","Data":"dcce5cd2605ff9cac2e52e442ccb3271433aed11b96a5b38be42de529edc359c"} Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.096611 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bhbtc" event={"ID":"d0fc5d59-3772-4f0d-941e-711604760b3f","Type":"ContainerDied","Data":"416cf0159a1bd79cb27362d1c6f9ba436d6d03df57bd9c3a23b2994f636fa3dd"} Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.096643 5014 scope.go:117] "RemoveContainer" containerID="dcce5cd2605ff9cac2e52e442ccb3271433aed11b96a5b38be42de529edc359c" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.096805 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bhbtc" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.098800 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0fc5d59-3772-4f0d-941e-711604760b3f-kube-api-access-mpkm7" (OuterVolumeSpecName: "kube-api-access-mpkm7") pod "d0fc5d59-3772-4f0d-941e-711604760b3f" (UID: "d0fc5d59-3772-4f0d-941e-711604760b3f"). InnerVolumeSpecName "kube-api-access-mpkm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.135543 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0fc5d59-3772-4f0d-941e-711604760b3f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d0fc5d59-3772-4f0d-941e-711604760b3f" (UID: "d0fc5d59-3772-4f0d-941e-711604760b3f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.145159 5014 scope.go:117] "RemoveContainer" containerID="3689b2c597bddb9bcf79d9b6efcbe012b095af713d0cf96e9c9fbceacb211f8b" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.171609 5014 scope.go:117] "RemoveContainer" containerID="c8b03b076b502f7737cfb060424a1de39b6bfb54e2baf2bd5419bfebd6b89fd1" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.185138 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpkm7\" (UniqueName: \"kubernetes.io/projected/d0fc5d59-3772-4f0d-941e-711604760b3f-kube-api-access-mpkm7\") on node \"crc\" DevicePath \"\"" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.185171 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0fc5d59-3772-4f0d-941e-711604760b3f-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.185181 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0fc5d59-3772-4f0d-941e-711604760b3f-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.217012 5014 scope.go:117] "RemoveContainer" containerID="dcce5cd2605ff9cac2e52e442ccb3271433aed11b96a5b38be42de529edc359c" Oct 06 23:05:21 crc kubenswrapper[5014]: E1006 23:05:21.217719 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcce5cd2605ff9cac2e52e442ccb3271433aed11b96a5b38be42de529edc359c\": container with ID starting with dcce5cd2605ff9cac2e52e442ccb3271433aed11b96a5b38be42de529edc359c not found: ID does not exist" containerID="dcce5cd2605ff9cac2e52e442ccb3271433aed11b96a5b38be42de529edc359c" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.217777 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcce5cd2605ff9cac2e52e442ccb3271433aed11b96a5b38be42de529edc359c"} err="failed to get container status \"dcce5cd2605ff9cac2e52e442ccb3271433aed11b96a5b38be42de529edc359c\": rpc error: code = NotFound desc = could not find container \"dcce5cd2605ff9cac2e52e442ccb3271433aed11b96a5b38be42de529edc359c\": container with ID starting with dcce5cd2605ff9cac2e52e442ccb3271433aed11b96a5b38be42de529edc359c not found: ID does not exist" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.217805 5014 scope.go:117] "RemoveContainer" containerID="3689b2c597bddb9bcf79d9b6efcbe012b095af713d0cf96e9c9fbceacb211f8b" Oct 06 23:05:21 crc kubenswrapper[5014]: E1006 23:05:21.218135 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3689b2c597bddb9bcf79d9b6efcbe012b095af713d0cf96e9c9fbceacb211f8b\": container with ID starting with 3689b2c597bddb9bcf79d9b6efcbe012b095af713d0cf96e9c9fbceacb211f8b not found: ID does not exist" containerID="3689b2c597bddb9bcf79d9b6efcbe012b095af713d0cf96e9c9fbceacb211f8b" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.218167 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3689b2c597bddb9bcf79d9b6efcbe012b095af713d0cf96e9c9fbceacb211f8b"} err="failed to get container status \"3689b2c597bddb9bcf79d9b6efcbe012b095af713d0cf96e9c9fbceacb211f8b\": rpc error: code = NotFound desc = could not find container \"3689b2c597bddb9bcf79d9b6efcbe012b095af713d0cf96e9c9fbceacb211f8b\": container with ID starting with 3689b2c597bddb9bcf79d9b6efcbe012b095af713d0cf96e9c9fbceacb211f8b not found: ID does not exist" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.218187 5014 scope.go:117] "RemoveContainer" containerID="c8b03b076b502f7737cfb060424a1de39b6bfb54e2baf2bd5419bfebd6b89fd1" Oct 06 23:05:21 crc kubenswrapper[5014]: E1006 23:05:21.218506 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8b03b076b502f7737cfb060424a1de39b6bfb54e2baf2bd5419bfebd6b89fd1\": container with ID starting with c8b03b076b502f7737cfb060424a1de39b6bfb54e2baf2bd5419bfebd6b89fd1 not found: ID does not exist" containerID="c8b03b076b502f7737cfb060424a1de39b6bfb54e2baf2bd5419bfebd6b89fd1" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.218550 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8b03b076b502f7737cfb060424a1de39b6bfb54e2baf2bd5419bfebd6b89fd1"} err="failed to get container status \"c8b03b076b502f7737cfb060424a1de39b6bfb54e2baf2bd5419bfebd6b89fd1\": rpc error: code = NotFound desc = could not find container \"c8b03b076b502f7737cfb060424a1de39b6bfb54e2baf2bd5419bfebd6b89fd1\": container with ID starting with c8b03b076b502f7737cfb060424a1de39b6bfb54e2baf2bd5419bfebd6b89fd1 not found: ID does not exist" Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.427286 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bhbtc"] Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.435825 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bhbtc"] Oct 06 23:05:21 crc kubenswrapper[5014]: I1006 23:05:21.499090 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0fc5d59-3772-4f0d-941e-711604760b3f" path="/var/lib/kubelet/pods/d0fc5d59-3772-4f0d-941e-711604760b3f/volumes" Oct 06 23:05:30 crc kubenswrapper[5014]: I1006 23:05:30.167791 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w49rt/crc-debug-fz6fq" event={"ID":"2f5be03b-0a81-489e-bc12-93898b726f55","Type":"ContainerStarted","Data":"2e089cfd2603d3f672be58d54b59a18b573100e841847416a340e470ac7501b3"} Oct 06 23:05:30 crc kubenswrapper[5014]: I1006 23:05:30.183122 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-w49rt/crc-debug-fz6fq" podStartSLOduration=1.886104879 podStartE2EDuration="12.183103631s" podCreationTimestamp="2025-10-06 23:05:18 +0000 UTC" firstStartedPulling="2025-10-06 23:05:18.721146419 +0000 UTC m=+5664.014183153" lastFinishedPulling="2025-10-06 23:05:29.018145171 +0000 UTC m=+5674.311181905" observedRunningTime="2025-10-06 23:05:30.182600956 +0000 UTC m=+5675.475637690" watchObservedRunningTime="2025-10-06 23:05:30.183103631 +0000 UTC m=+5675.476140365" Oct 06 23:05:51 crc kubenswrapper[5014]: I1006 23:05:51.735078 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 23:05:51 crc kubenswrapper[5014]: I1006 23:05:51.735840 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 23:05:59 crc kubenswrapper[5014]: I1006 23:05:59.674762 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fpqt7"] Oct 06 23:05:59 crc kubenswrapper[5014]: E1006 23:05:59.675865 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0fc5d59-3772-4f0d-941e-711604760b3f" containerName="extract-content" Oct 06 23:05:59 crc kubenswrapper[5014]: I1006 23:05:59.675886 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0fc5d59-3772-4f0d-941e-711604760b3f" containerName="extract-content" Oct 06 23:05:59 crc kubenswrapper[5014]: E1006 23:05:59.675910 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0fc5d59-3772-4f0d-941e-711604760b3f" containerName="registry-server" Oct 06 23:05:59 crc kubenswrapper[5014]: I1006 23:05:59.675917 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0fc5d59-3772-4f0d-941e-711604760b3f" containerName="registry-server" Oct 06 23:05:59 crc kubenswrapper[5014]: E1006 23:05:59.675935 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0fc5d59-3772-4f0d-941e-711604760b3f" containerName="extract-utilities" Oct 06 23:05:59 crc kubenswrapper[5014]: I1006 23:05:59.675941 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0fc5d59-3772-4f0d-941e-711604760b3f" containerName="extract-utilities" Oct 06 23:05:59 crc kubenswrapper[5014]: I1006 23:05:59.676132 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0fc5d59-3772-4f0d-941e-711604760b3f" containerName="registry-server" Oct 06 23:05:59 crc kubenswrapper[5014]: I1006 23:05:59.677592 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:05:59 crc kubenswrapper[5014]: I1006 23:05:59.682110 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fpqt7"] Oct 06 23:05:59 crc kubenswrapper[5014]: I1006 23:05:59.786289 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61a2a08e-e941-493d-a4d5-c245d4b34d2e-catalog-content\") pod \"redhat-marketplace-fpqt7\" (UID: \"61a2a08e-e941-493d-a4d5-c245d4b34d2e\") " pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:05:59 crc kubenswrapper[5014]: I1006 23:05:59.786393 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61a2a08e-e941-493d-a4d5-c245d4b34d2e-utilities\") pod \"redhat-marketplace-fpqt7\" (UID: \"61a2a08e-e941-493d-a4d5-c245d4b34d2e\") " pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:05:59 crc kubenswrapper[5014]: I1006 23:05:59.786463 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ss6vf\" (UniqueName: \"kubernetes.io/projected/61a2a08e-e941-493d-a4d5-c245d4b34d2e-kube-api-access-ss6vf\") pod \"redhat-marketplace-fpqt7\" (UID: \"61a2a08e-e941-493d-a4d5-c245d4b34d2e\") " pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:05:59 crc kubenswrapper[5014]: I1006 23:05:59.887779 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ss6vf\" (UniqueName: \"kubernetes.io/projected/61a2a08e-e941-493d-a4d5-c245d4b34d2e-kube-api-access-ss6vf\") pod \"redhat-marketplace-fpqt7\" (UID: \"61a2a08e-e941-493d-a4d5-c245d4b34d2e\") " pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:05:59 crc kubenswrapper[5014]: I1006 23:05:59.887873 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61a2a08e-e941-493d-a4d5-c245d4b34d2e-catalog-content\") pod \"redhat-marketplace-fpqt7\" (UID: \"61a2a08e-e941-493d-a4d5-c245d4b34d2e\") " pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:05:59 crc kubenswrapper[5014]: I1006 23:05:59.887940 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61a2a08e-e941-493d-a4d5-c245d4b34d2e-utilities\") pod \"redhat-marketplace-fpqt7\" (UID: \"61a2a08e-e941-493d-a4d5-c245d4b34d2e\") " pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:05:59 crc kubenswrapper[5014]: I1006 23:05:59.888399 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61a2a08e-e941-493d-a4d5-c245d4b34d2e-utilities\") pod \"redhat-marketplace-fpqt7\" (UID: \"61a2a08e-e941-493d-a4d5-c245d4b34d2e\") " pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:05:59 crc kubenswrapper[5014]: I1006 23:05:59.888641 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61a2a08e-e941-493d-a4d5-c245d4b34d2e-catalog-content\") pod \"redhat-marketplace-fpqt7\" (UID: \"61a2a08e-e941-493d-a4d5-c245d4b34d2e\") " pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:05:59 crc kubenswrapper[5014]: I1006 23:05:59.915643 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ss6vf\" (UniqueName: \"kubernetes.io/projected/61a2a08e-e941-493d-a4d5-c245d4b34d2e-kube-api-access-ss6vf\") pod \"redhat-marketplace-fpqt7\" (UID: \"61a2a08e-e941-493d-a4d5-c245d4b34d2e\") " pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:06:00 crc kubenswrapper[5014]: I1006 23:06:00.005693 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:06:00 crc kubenswrapper[5014]: I1006 23:06:00.552052 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fpqt7"] Oct 06 23:06:00 crc kubenswrapper[5014]: W1006 23:06:00.579021 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod61a2a08e_e941_493d_a4d5_c245d4b34d2e.slice/crio-265e904df04bcd99c936b401dbc33ba5d0a4fb125b26bb9e4f44c12b8745b84a WatchSource:0}: Error finding container 265e904df04bcd99c936b401dbc33ba5d0a4fb125b26bb9e4f44c12b8745b84a: Status 404 returned error can't find the container with id 265e904df04bcd99c936b401dbc33ba5d0a4fb125b26bb9e4f44c12b8745b84a Oct 06 23:06:01 crc kubenswrapper[5014]: I1006 23:06:01.477779 5014 generic.go:334] "Generic (PLEG): container finished" podID="61a2a08e-e941-493d-a4d5-c245d4b34d2e" containerID="0238d740fcd5f7d14d75201f03703fdf0a1d0689bac5d4d8641dd5045bcbd14a" exitCode=0 Oct 06 23:06:01 crc kubenswrapper[5014]: I1006 23:06:01.477880 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpqt7" event={"ID":"61a2a08e-e941-493d-a4d5-c245d4b34d2e","Type":"ContainerDied","Data":"0238d740fcd5f7d14d75201f03703fdf0a1d0689bac5d4d8641dd5045bcbd14a"} Oct 06 23:06:01 crc kubenswrapper[5014]: I1006 23:06:01.478146 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpqt7" event={"ID":"61a2a08e-e941-493d-a4d5-c245d4b34d2e","Type":"ContainerStarted","Data":"265e904df04bcd99c936b401dbc33ba5d0a4fb125b26bb9e4f44c12b8745b84a"} Oct 06 23:06:02 crc kubenswrapper[5014]: I1006 23:06:02.492764 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpqt7" event={"ID":"61a2a08e-e941-493d-a4d5-c245d4b34d2e","Type":"ContainerStarted","Data":"f63538c2d8cf8a9dc10b7e78a9249c32677f57d97e5cb977639b4687bd95e5cd"} Oct 06 23:06:03 crc kubenswrapper[5014]: I1006 23:06:03.505908 5014 generic.go:334] "Generic (PLEG): container finished" podID="61a2a08e-e941-493d-a4d5-c245d4b34d2e" containerID="f63538c2d8cf8a9dc10b7e78a9249c32677f57d97e5cb977639b4687bd95e5cd" exitCode=0 Oct 06 23:06:03 crc kubenswrapper[5014]: I1006 23:06:03.505972 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpqt7" event={"ID":"61a2a08e-e941-493d-a4d5-c245d4b34d2e","Type":"ContainerDied","Data":"f63538c2d8cf8a9dc10b7e78a9249c32677f57d97e5cb977639b4687bd95e5cd"} Oct 06 23:06:04 crc kubenswrapper[5014]: I1006 23:06:04.099900 5014 scope.go:117] "RemoveContainer" containerID="381353fb34afa6363549c3202bfb671473be4e37b2d2260cdf6c01ffbf096552" Oct 06 23:06:04 crc kubenswrapper[5014]: I1006 23:06:04.517447 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpqt7" event={"ID":"61a2a08e-e941-493d-a4d5-c245d4b34d2e","Type":"ContainerStarted","Data":"1abb55bf37ad7e7771be7802f9458d931cf7eb960ec38d8fb5fbc7d9c72de3fd"} Oct 06 23:06:04 crc kubenswrapper[5014]: I1006 23:06:04.541418 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fpqt7" podStartSLOduration=2.749429288 podStartE2EDuration="5.541401767s" podCreationTimestamp="2025-10-06 23:05:59 +0000 UTC" firstStartedPulling="2025-10-06 23:06:01.4834839 +0000 UTC m=+5706.776520634" lastFinishedPulling="2025-10-06 23:06:04.275456379 +0000 UTC m=+5709.568493113" observedRunningTime="2025-10-06 23:06:04.538500217 +0000 UTC m=+5709.831536951" watchObservedRunningTime="2025-10-06 23:06:04.541401767 +0000 UTC m=+5709.834438501" Oct 06 23:06:05 crc kubenswrapper[5014]: I1006 23:06:05.507731 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-41d5-account-create-gfwn5_7f71f2dc-8f61-41db-81bb-929ab9e4676e/mariadb-account-create/0.log" Oct 06 23:06:05 crc kubenswrapper[5014]: I1006 23:06:05.664986 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6c879f4fd6-dj9kj_f0b1051a-545a-4bcb-940e-962ee142eda6/barbican-api/0.log" Oct 06 23:06:05 crc kubenswrapper[5014]: I1006 23:06:05.707006 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6c879f4fd6-dj9kj_f0b1051a-545a-4bcb-940e-962ee142eda6/barbican-api-log/0.log" Oct 06 23:06:05 crc kubenswrapper[5014]: I1006 23:06:05.879027 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-db-create-ftfn9_af485524-a88f-45e0-ac5a-73a702c8ab82/mariadb-database-create/0.log" Oct 06 23:06:06 crc kubenswrapper[5014]: I1006 23:06:06.079512 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-db-sync-sg4w2_0407dde2-0230-4190-bf9b-75d194fbf559/barbican-db-sync/0.log" Oct 06 23:06:06 crc kubenswrapper[5014]: I1006 23:06:06.148929 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-7bddb697b-t7597_d9940507-c903-4972-8914-2727614b0e7c/barbican-keystone-listener/0.log" Oct 06 23:06:06 crc kubenswrapper[5014]: I1006 23:06:06.324401 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-7bddb697b-t7597_d9940507-c903-4972-8914-2727614b0e7c/barbican-keystone-listener-log/0.log" Oct 06 23:06:06 crc kubenswrapper[5014]: I1006 23:06:06.368995 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-79d6c6fc67-tshsj_e90d3a14-2789-4684-998e-c392eddebbb6/barbican-worker/0.log" Oct 06 23:06:06 crc kubenswrapper[5014]: I1006 23:06:06.551102 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-79d6c6fc67-tshsj_e90d3a14-2789-4684-998e-c392eddebbb6/barbican-worker-log/0.log" Oct 06 23:06:06 crc kubenswrapper[5014]: I1006 23:06:06.648036 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-fbc86f6b9-m59jq_d04f43e6-ed68-4b30-af11-fc47b9187b32/init/0.log" Oct 06 23:06:06 crc kubenswrapper[5014]: I1006 23:06:06.771259 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-fbc86f6b9-m59jq_d04f43e6-ed68-4b30-af11-fc47b9187b32/init/0.log" Oct 06 23:06:06 crc kubenswrapper[5014]: I1006 23:06:06.878457 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-fbc86f6b9-m59jq_d04f43e6-ed68-4b30-af11-fc47b9187b32/dnsmasq-dns/0.log" Oct 06 23:06:06 crc kubenswrapper[5014]: I1006 23:06:06.992432 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-5c4c59f684-p8fnf_05be9aa1-8c19-404f-9769-c0bf03794d09/keystone-api/0.log" Oct 06 23:06:07 crc kubenswrapper[5014]: I1006 23:06:07.127336 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-6bfb-account-create-wptvp_592b407e-657b-4204-af83-d4fe4508ee7e/mariadb-account-create/0.log" Oct 06 23:06:07 crc kubenswrapper[5014]: I1006 23:06:07.344321 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-bootstrap-cjxq2_1cf816d0-0a38-4417-91a3-be05870aa60d/keystone-bootstrap/0.log" Oct 06 23:06:07 crc kubenswrapper[5014]: I1006 23:06:07.493659 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29329861-59gbv_5ae5cdba-2926-4eb6-ba70-1c6b65be53f6/keystone-cron/0.log" Oct 06 23:06:07 crc kubenswrapper[5014]: I1006 23:06:07.683232 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-db-create-n2tx5_ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4/mariadb-database-create/0.log" Oct 06 23:06:07 crc kubenswrapper[5014]: I1006 23:06:07.831813 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-db-sync-j9dm7_c3c29595-ff94-4ee2-a182-29445fe5d2ad/keystone-db-sync/0.log" Oct 06 23:06:07 crc kubenswrapper[5014]: I1006 23:06:07.959802 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-copy-data_36650df3-b498-4a78-8d62-ade74cd09dc3/adoption/0.log" Oct 06 23:06:08 crc kubenswrapper[5014]: I1006 23:06:08.487644 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-0178-account-create-9r9g9_d7abf808-7de9-4072-a359-0b96605a8b33/mariadb-account-create/0.log" Oct 06 23:06:08 crc kubenswrapper[5014]: I1006 23:06:08.709651 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5c84bf6665-5lrrl_e21fe075-9b63-41c8-a283-7659139c4ecd/neutron-httpd/0.log" Oct 06 23:06:08 crc kubenswrapper[5014]: I1006 23:06:08.740027 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5c84bf6665-5lrrl_e21fe075-9b63-41c8-a283-7659139c4ecd/neutron-api/0.log" Oct 06 23:06:08 crc kubenswrapper[5014]: I1006 23:06:08.902168 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-db-create-t6vdl_b1c8aaf9-b44d-41da-9beb-ffaf1d68b938/mariadb-database-create/0.log" Oct 06 23:06:09 crc kubenswrapper[5014]: I1006 23:06:09.148328 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-db-sync-c6856_764bd6b3-4473-4221-8d5c-9ed898cb677b/neutron-db-sync/0.log" Oct 06 23:06:09 crc kubenswrapper[5014]: I1006 23:06:09.178541 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_5d2f95cf-210c-459c-a307-ce2397afb314/mysql-bootstrap/0.log" Oct 06 23:06:09 crc kubenswrapper[5014]: I1006 23:06:09.433403 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_5d2f95cf-210c-459c-a307-ce2397afb314/galera/0.log" Oct 06 23:06:09 crc kubenswrapper[5014]: I1006 23:06:09.453647 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_5d2f95cf-210c-459c-a307-ce2397afb314/mysql-bootstrap/0.log" Oct 06 23:06:09 crc kubenswrapper[5014]: I1006 23:06:09.713922 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_50d8273d-66b9-40f1-8d29-23052463812d/mysql-bootstrap/0.log" Oct 06 23:06:10 crc kubenswrapper[5014]: I1006 23:06:10.009888 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:06:10 crc kubenswrapper[5014]: I1006 23:06:10.009930 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:06:10 crc kubenswrapper[5014]: I1006 23:06:10.100679 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:06:10 crc kubenswrapper[5014]: I1006 23:06:10.126636 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_50d8273d-66b9-40f1-8d29-23052463812d/mysql-bootstrap/0.log" Oct 06 23:06:10 crc kubenswrapper[5014]: I1006 23:06:10.139159 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_50d8273d-66b9-40f1-8d29-23052463812d/galera/0.log" Oct 06 23:06:10 crc kubenswrapper[5014]: I1006 23:06:10.207836 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_8557459e-5fc3-4d04-8827-8e8924429c15/memcached/0.log" Oct 06 23:06:10 crc kubenswrapper[5014]: I1006 23:06:10.319107 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_af37df4c-adee-4877-83cb-35b343b6c8e7/openstackclient/0.log" Oct 06 23:06:10 crc kubenswrapper[5014]: I1006 23:06:10.418326 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-copy-data_598e320b-1fec-436c-92fa-2dcc28318950/adoption/0.log" Oct 06 23:06:10 crc kubenswrapper[5014]: I1006 23:06:10.557762 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_7f255b6d-1bad-47db-8049-e388bcfbf98c/openstack-network-exporter/0.log" Oct 06 23:06:10 crc kubenswrapper[5014]: I1006 23:06:10.575352 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_7f255b6d-1bad-47db-8049-e388bcfbf98c/ovn-northd/0.log" Oct 06 23:06:10 crc kubenswrapper[5014]: I1006 23:06:10.658508 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:06:10 crc kubenswrapper[5014]: I1006 23:06:10.710318 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fpqt7"] Oct 06 23:06:10 crc kubenswrapper[5014]: I1006 23:06:10.724313 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_3399bbdd-5294-48b8-a026-a0ebbd3eddd1/openstack-network-exporter/0.log" Oct 06 23:06:10 crc kubenswrapper[5014]: I1006 23:06:10.769118 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_3399bbdd-5294-48b8-a026-a0ebbd3eddd1/ovsdbserver-nb/0.log" Oct 06 23:06:10 crc kubenswrapper[5014]: I1006 23:06:10.894668 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_02489223-b511-40b5-8eaa-0aeee96a0e68/openstack-network-exporter/0.log" Oct 06 23:06:10 crc kubenswrapper[5014]: I1006 23:06:10.962433 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_02489223-b511-40b5-8eaa-0aeee96a0e68/ovsdbserver-nb/0.log" Oct 06 23:06:11 crc kubenswrapper[5014]: I1006 23:06:11.068559 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_5728d872-705b-4439-965d-36634f78b9c3/openstack-network-exporter/0.log" Oct 06 23:06:11 crc kubenswrapper[5014]: I1006 23:06:11.082239 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_5728d872-705b-4439-965d-36634f78b9c3/ovsdbserver-nb/0.log" Oct 06 23:06:11 crc kubenswrapper[5014]: I1006 23:06:11.345101 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_9916618a-700d-406e-b443-38423eafb158/openstack-network-exporter/0.log" Oct 06 23:06:11 crc kubenswrapper[5014]: I1006 23:06:11.424330 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_9916618a-700d-406e-b443-38423eafb158/ovsdbserver-sb/0.log" Oct 06 23:06:11 crc kubenswrapper[5014]: I1006 23:06:11.549563 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_3d7105fa-62d5-469e-b4f3-774c6b1a5a90/openstack-network-exporter/0.log" Oct 06 23:06:11 crc kubenswrapper[5014]: I1006 23:06:11.588197 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_3d7105fa-62d5-469e-b4f3-774c6b1a5a90/ovsdbserver-sb/0.log" Oct 06 23:06:11 crc kubenswrapper[5014]: I1006 23:06:11.702759 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_44f14dca-2c88-421b-8f41-302926b341bb/openstack-network-exporter/0.log" Oct 06 23:06:11 crc kubenswrapper[5014]: I1006 23:06:11.740437 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_44f14dca-2c88-421b-8f41-302926b341bb/ovsdbserver-sb/0.log" Oct 06 23:06:11 crc kubenswrapper[5014]: I1006 23:06:11.855600 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_ece821f8-f5e4-4e03-8dd6-fab61ba9eca9/setup-container/0.log" Oct 06 23:06:12 crc kubenswrapper[5014]: I1006 23:06:12.065403 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_ece821f8-f5e4-4e03-8dd6-fab61ba9eca9/setup-container/0.log" Oct 06 23:06:12 crc kubenswrapper[5014]: I1006 23:06:12.110376 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_ece821f8-f5e4-4e03-8dd6-fab61ba9eca9/rabbitmq/0.log" Oct 06 23:06:12 crc kubenswrapper[5014]: I1006 23:06:12.112751 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_aa637783-440d-4a1b-ada6-6bbf5be1bc84/setup-container/0.log" Oct 06 23:06:12 crc kubenswrapper[5014]: I1006 23:06:12.255743 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_aa637783-440d-4a1b-ada6-6bbf5be1bc84/rabbitmq/0.log" Oct 06 23:06:12 crc kubenswrapper[5014]: I1006 23:06:12.294250 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_aa637783-440d-4a1b-ada6-6bbf5be1bc84/setup-container/0.log" Oct 06 23:06:12 crc kubenswrapper[5014]: I1006 23:06:12.596332 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fpqt7" podUID="61a2a08e-e941-493d-a4d5-c245d4b34d2e" containerName="registry-server" containerID="cri-o://1abb55bf37ad7e7771be7802f9458d931cf7eb960ec38d8fb5fbc7d9c72de3fd" gracePeriod=2 Oct 06 23:06:13 crc kubenswrapper[5014]: I1006 23:06:13.612473 5014 generic.go:334] "Generic (PLEG): container finished" podID="61a2a08e-e941-493d-a4d5-c245d4b34d2e" containerID="1abb55bf37ad7e7771be7802f9458d931cf7eb960ec38d8fb5fbc7d9c72de3fd" exitCode=0 Oct 06 23:06:13 crc kubenswrapper[5014]: I1006 23:06:13.612525 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpqt7" event={"ID":"61a2a08e-e941-493d-a4d5-c245d4b34d2e","Type":"ContainerDied","Data":"1abb55bf37ad7e7771be7802f9458d931cf7eb960ec38d8fb5fbc7d9c72de3fd"} Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.312088 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.360002 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61a2a08e-e941-493d-a4d5-c245d4b34d2e-utilities\") pod \"61a2a08e-e941-493d-a4d5-c245d4b34d2e\" (UID: \"61a2a08e-e941-493d-a4d5-c245d4b34d2e\") " Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.360168 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ss6vf\" (UniqueName: \"kubernetes.io/projected/61a2a08e-e941-493d-a4d5-c245d4b34d2e-kube-api-access-ss6vf\") pod \"61a2a08e-e941-493d-a4d5-c245d4b34d2e\" (UID: \"61a2a08e-e941-493d-a4d5-c245d4b34d2e\") " Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.360248 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61a2a08e-e941-493d-a4d5-c245d4b34d2e-catalog-content\") pod \"61a2a08e-e941-493d-a4d5-c245d4b34d2e\" (UID: \"61a2a08e-e941-493d-a4d5-c245d4b34d2e\") " Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.360976 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61a2a08e-e941-493d-a4d5-c245d4b34d2e-utilities" (OuterVolumeSpecName: "utilities") pod "61a2a08e-e941-493d-a4d5-c245d4b34d2e" (UID: "61a2a08e-e941-493d-a4d5-c245d4b34d2e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.372281 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61a2a08e-e941-493d-a4d5-c245d4b34d2e-kube-api-access-ss6vf" (OuterVolumeSpecName: "kube-api-access-ss6vf") pod "61a2a08e-e941-493d-a4d5-c245d4b34d2e" (UID: "61a2a08e-e941-493d-a4d5-c245d4b34d2e"). InnerVolumeSpecName "kube-api-access-ss6vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.394190 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61a2a08e-e941-493d-a4d5-c245d4b34d2e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "61a2a08e-e941-493d-a4d5-c245d4b34d2e" (UID: "61a2a08e-e941-493d-a4d5-c245d4b34d2e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.462030 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ss6vf\" (UniqueName: \"kubernetes.io/projected/61a2a08e-e941-493d-a4d5-c245d4b34d2e-kube-api-access-ss6vf\") on node \"crc\" DevicePath \"\"" Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.462059 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61a2a08e-e941-493d-a4d5-c245d4b34d2e-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.462068 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61a2a08e-e941-493d-a4d5-c245d4b34d2e-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.622119 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpqt7" event={"ID":"61a2a08e-e941-493d-a4d5-c245d4b34d2e","Type":"ContainerDied","Data":"265e904df04bcd99c936b401dbc33ba5d0a4fb125b26bb9e4f44c12b8745b84a"} Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.622176 5014 scope.go:117] "RemoveContainer" containerID="1abb55bf37ad7e7771be7802f9458d931cf7eb960ec38d8fb5fbc7d9c72de3fd" Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.622209 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fpqt7" Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.651384 5014 scope.go:117] "RemoveContainer" containerID="f63538c2d8cf8a9dc10b7e78a9249c32677f57d97e5cb977639b4687bd95e5cd" Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.661432 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fpqt7"] Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.674239 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fpqt7"] Oct 06 23:06:14 crc kubenswrapper[5014]: I1006 23:06:14.675825 5014 scope.go:117] "RemoveContainer" containerID="0238d740fcd5f7d14d75201f03703fdf0a1d0689bac5d4d8641dd5045bcbd14a" Oct 06 23:06:15 crc kubenswrapper[5014]: I1006 23:06:15.496266 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61a2a08e-e941-493d-a4d5-c245d4b34d2e" path="/var/lib/kubelet/pods/61a2a08e-e941-493d-a4d5-c245d4b34d2e/volumes" Oct 06 23:06:21 crc kubenswrapper[5014]: I1006 23:06:21.735718 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 23:06:21 crc kubenswrapper[5014]: I1006 23:06:21.736447 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 23:06:51 crc kubenswrapper[5014]: I1006 23:06:51.735661 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 23:06:51 crc kubenswrapper[5014]: I1006 23:06:51.736323 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 23:06:51 crc kubenswrapper[5014]: I1006 23:06:51.736391 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 23:06:51 crc kubenswrapper[5014]: I1006 23:06:51.737120 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b714fdc15c24e51123635f494a6a27ab187d08603b236a717c87633ed54de216"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 23:06:51 crc kubenswrapper[5014]: I1006 23:06:51.737168 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://b714fdc15c24e51123635f494a6a27ab187d08603b236a717c87633ed54de216" gracePeriod=600 Oct 06 23:06:52 crc kubenswrapper[5014]: I1006 23:06:52.024469 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="b714fdc15c24e51123635f494a6a27ab187d08603b236a717c87633ed54de216" exitCode=0 Oct 06 23:06:52 crc kubenswrapper[5014]: I1006 23:06:52.024719 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"b714fdc15c24e51123635f494a6a27ab187d08603b236a717c87633ed54de216"} Oct 06 23:06:52 crc kubenswrapper[5014]: I1006 23:06:52.024954 5014 scope.go:117] "RemoveContainer" containerID="3dc89b233812ed3a78d08389b5420f2dd0eee375620d1ca250f6f8562d37b016" Oct 06 23:06:53 crc kubenswrapper[5014]: I1006 23:06:53.039573 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerStarted","Data":"d31155bd7b4769b3eedfded0dec5a850227452b5b5684203cfea03e6550fc584"} Oct 06 23:06:57 crc kubenswrapper[5014]: I1006 23:06:57.092805 5014 generic.go:334] "Generic (PLEG): container finished" podID="2f5be03b-0a81-489e-bc12-93898b726f55" containerID="2e089cfd2603d3f672be58d54b59a18b573100e841847416a340e470ac7501b3" exitCode=0 Oct 06 23:06:57 crc kubenswrapper[5014]: I1006 23:06:57.092879 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w49rt/crc-debug-fz6fq" event={"ID":"2f5be03b-0a81-489e-bc12-93898b726f55","Type":"ContainerDied","Data":"2e089cfd2603d3f672be58d54b59a18b573100e841847416a340e470ac7501b3"} Oct 06 23:06:58 crc kubenswrapper[5014]: I1006 23:06:58.215176 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w49rt/crc-debug-fz6fq" Oct 06 23:06:58 crc kubenswrapper[5014]: I1006 23:06:58.253137 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-w49rt/crc-debug-fz6fq"] Oct 06 23:06:58 crc kubenswrapper[5014]: I1006 23:06:58.260747 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-w49rt/crc-debug-fz6fq"] Oct 06 23:06:58 crc kubenswrapper[5014]: I1006 23:06:58.355721 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8jhr\" (UniqueName: \"kubernetes.io/projected/2f5be03b-0a81-489e-bc12-93898b726f55-kube-api-access-j8jhr\") pod \"2f5be03b-0a81-489e-bc12-93898b726f55\" (UID: \"2f5be03b-0a81-489e-bc12-93898b726f55\") " Oct 06 23:06:58 crc kubenswrapper[5014]: I1006 23:06:58.355970 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2f5be03b-0a81-489e-bc12-93898b726f55-host\") pod \"2f5be03b-0a81-489e-bc12-93898b726f55\" (UID: \"2f5be03b-0a81-489e-bc12-93898b726f55\") " Oct 06 23:06:58 crc kubenswrapper[5014]: I1006 23:06:58.356778 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2f5be03b-0a81-489e-bc12-93898b726f55-host" (OuterVolumeSpecName: "host") pod "2f5be03b-0a81-489e-bc12-93898b726f55" (UID: "2f5be03b-0a81-489e-bc12-93898b726f55"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 23:06:58 crc kubenswrapper[5014]: I1006 23:06:58.361897 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f5be03b-0a81-489e-bc12-93898b726f55-kube-api-access-j8jhr" (OuterVolumeSpecName: "kube-api-access-j8jhr") pod "2f5be03b-0a81-489e-bc12-93898b726f55" (UID: "2f5be03b-0a81-489e-bc12-93898b726f55"). InnerVolumeSpecName "kube-api-access-j8jhr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:06:58 crc kubenswrapper[5014]: I1006 23:06:58.459036 5014 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2f5be03b-0a81-489e-bc12-93898b726f55-host\") on node \"crc\" DevicePath \"\"" Oct 06 23:06:58 crc kubenswrapper[5014]: I1006 23:06:58.459082 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8jhr\" (UniqueName: \"kubernetes.io/projected/2f5be03b-0a81-489e-bc12-93898b726f55-kube-api-access-j8jhr\") on node \"crc\" DevicePath \"\"" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.119035 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad38b86d54089687436dd0fbaa88e012db879e205906fe8e36050d25773822ee" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.119131 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w49rt/crc-debug-fz6fq" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.439472 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-w49rt/crc-debug-lbzpr"] Oct 06 23:06:59 crc kubenswrapper[5014]: E1006 23:06:59.440029 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61a2a08e-e941-493d-a4d5-c245d4b34d2e" containerName="extract-content" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.440054 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="61a2a08e-e941-493d-a4d5-c245d4b34d2e" containerName="extract-content" Oct 06 23:06:59 crc kubenswrapper[5014]: E1006 23:06:59.440094 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f5be03b-0a81-489e-bc12-93898b726f55" containerName="container-00" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.440106 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f5be03b-0a81-489e-bc12-93898b726f55" containerName="container-00" Oct 06 23:06:59 crc kubenswrapper[5014]: E1006 23:06:59.440129 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61a2a08e-e941-493d-a4d5-c245d4b34d2e" containerName="extract-utilities" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.440144 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="61a2a08e-e941-493d-a4d5-c245d4b34d2e" containerName="extract-utilities" Oct 06 23:06:59 crc kubenswrapper[5014]: E1006 23:06:59.440170 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61a2a08e-e941-493d-a4d5-c245d4b34d2e" containerName="registry-server" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.440181 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="61a2a08e-e941-493d-a4d5-c245d4b34d2e" containerName="registry-server" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.440456 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="61a2a08e-e941-493d-a4d5-c245d4b34d2e" containerName="registry-server" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.440528 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f5be03b-0a81-489e-bc12-93898b726f55" containerName="container-00" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.441557 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w49rt/crc-debug-lbzpr" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.503889 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f5be03b-0a81-489e-bc12-93898b726f55" path="/var/lib/kubelet/pods/2f5be03b-0a81-489e-bc12-93898b726f55/volumes" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.580872 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7-host\") pod \"crc-debug-lbzpr\" (UID: \"e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7\") " pod="openshift-must-gather-w49rt/crc-debug-lbzpr" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.581164 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rw7fl\" (UniqueName: \"kubernetes.io/projected/e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7-kube-api-access-rw7fl\") pod \"crc-debug-lbzpr\" (UID: \"e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7\") " pod="openshift-must-gather-w49rt/crc-debug-lbzpr" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.682492 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rw7fl\" (UniqueName: \"kubernetes.io/projected/e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7-kube-api-access-rw7fl\") pod \"crc-debug-lbzpr\" (UID: \"e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7\") " pod="openshift-must-gather-w49rt/crc-debug-lbzpr" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.682687 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7-host\") pod \"crc-debug-lbzpr\" (UID: \"e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7\") " pod="openshift-must-gather-w49rt/crc-debug-lbzpr" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.682806 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7-host\") pod \"crc-debug-lbzpr\" (UID: \"e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7\") " pod="openshift-must-gather-w49rt/crc-debug-lbzpr" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.703153 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rw7fl\" (UniqueName: \"kubernetes.io/projected/e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7-kube-api-access-rw7fl\") pod \"crc-debug-lbzpr\" (UID: \"e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7\") " pod="openshift-must-gather-w49rt/crc-debug-lbzpr" Oct 06 23:06:59 crc kubenswrapper[5014]: I1006 23:06:59.773172 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w49rt/crc-debug-lbzpr" Oct 06 23:07:00 crc kubenswrapper[5014]: I1006 23:07:00.130095 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w49rt/crc-debug-lbzpr" event={"ID":"e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7","Type":"ContainerStarted","Data":"d68d0870f81551c9d93f286146bfb08ecf97877419a20168a15354e8ba792589"} Oct 06 23:07:01 crc kubenswrapper[5014]: I1006 23:07:01.161709 5014 generic.go:334] "Generic (PLEG): container finished" podID="e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7" containerID="25472e863d37a8748f368fea44bd9375406d674a820f5f83fae18f2827670b68" exitCode=0 Oct 06 23:07:01 crc kubenswrapper[5014]: I1006 23:07:01.162009 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w49rt/crc-debug-lbzpr" event={"ID":"e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7","Type":"ContainerDied","Data":"25472e863d37a8748f368fea44bd9375406d674a820f5f83fae18f2827670b68"} Oct 06 23:07:02 crc kubenswrapper[5014]: I1006 23:07:02.243096 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w49rt/crc-debug-lbzpr" Oct 06 23:07:02 crc kubenswrapper[5014]: I1006 23:07:02.421811 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7-host\") pod \"e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7\" (UID: \"e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7\") " Oct 06 23:07:02 crc kubenswrapper[5014]: I1006 23:07:02.421919 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7-host" (OuterVolumeSpecName: "host") pod "e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7" (UID: "e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 23:07:02 crc kubenswrapper[5014]: I1006 23:07:02.422017 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rw7fl\" (UniqueName: \"kubernetes.io/projected/e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7-kube-api-access-rw7fl\") pod \"e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7\" (UID: \"e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7\") " Oct 06 23:07:02 crc kubenswrapper[5014]: I1006 23:07:02.422374 5014 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7-host\") on node \"crc\" DevicePath \"\"" Oct 06 23:07:02 crc kubenswrapper[5014]: I1006 23:07:02.427460 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7-kube-api-access-rw7fl" (OuterVolumeSpecName: "kube-api-access-rw7fl") pod "e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7" (UID: "e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7"). InnerVolumeSpecName "kube-api-access-rw7fl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:07:02 crc kubenswrapper[5014]: I1006 23:07:02.523516 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rw7fl\" (UniqueName: \"kubernetes.io/projected/e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7-kube-api-access-rw7fl\") on node \"crc\" DevicePath \"\"" Oct 06 23:07:03 crc kubenswrapper[5014]: I1006 23:07:03.178124 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w49rt/crc-debug-lbzpr" event={"ID":"e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7","Type":"ContainerDied","Data":"d68d0870f81551c9d93f286146bfb08ecf97877419a20168a15354e8ba792589"} Oct 06 23:07:03 crc kubenswrapper[5014]: I1006 23:07:03.178169 5014 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d68d0870f81551c9d93f286146bfb08ecf97877419a20168a15354e8ba792589" Oct 06 23:07:03 crc kubenswrapper[5014]: I1006 23:07:03.178223 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w49rt/crc-debug-lbzpr" Oct 06 23:07:08 crc kubenswrapper[5014]: I1006 23:07:08.445218 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-w49rt/crc-debug-lbzpr"] Oct 06 23:07:08 crc kubenswrapper[5014]: I1006 23:07:08.452835 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-w49rt/crc-debug-lbzpr"] Oct 06 23:07:09 crc kubenswrapper[5014]: I1006 23:07:09.501212 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7" path="/var/lib/kubelet/pods/e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7/volumes" Oct 06 23:07:09 crc kubenswrapper[5014]: I1006 23:07:09.644052 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-w49rt/crc-debug-9h5vn"] Oct 06 23:07:09 crc kubenswrapper[5014]: E1006 23:07:09.644394 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7" containerName="container-00" Oct 06 23:07:09 crc kubenswrapper[5014]: I1006 23:07:09.644406 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7" containerName="container-00" Oct 06 23:07:09 crc kubenswrapper[5014]: I1006 23:07:09.644564 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3c099f2-1c2c-47a6-8a6f-99ad1c9388f7" containerName="container-00" Oct 06 23:07:09 crc kubenswrapper[5014]: I1006 23:07:09.645119 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w49rt/crc-debug-9h5vn" Oct 06 23:07:09 crc kubenswrapper[5014]: I1006 23:07:09.740118 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9359097f-ab2e-483f-a80e-2fe21fa38590-host\") pod \"crc-debug-9h5vn\" (UID: \"9359097f-ab2e-483f-a80e-2fe21fa38590\") " pod="openshift-must-gather-w49rt/crc-debug-9h5vn" Oct 06 23:07:09 crc kubenswrapper[5014]: I1006 23:07:09.740818 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rswqj\" (UniqueName: \"kubernetes.io/projected/9359097f-ab2e-483f-a80e-2fe21fa38590-kube-api-access-rswqj\") pod \"crc-debug-9h5vn\" (UID: \"9359097f-ab2e-483f-a80e-2fe21fa38590\") " pod="openshift-must-gather-w49rt/crc-debug-9h5vn" Oct 06 23:07:09 crc kubenswrapper[5014]: I1006 23:07:09.843407 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9359097f-ab2e-483f-a80e-2fe21fa38590-host\") pod \"crc-debug-9h5vn\" (UID: \"9359097f-ab2e-483f-a80e-2fe21fa38590\") " pod="openshift-must-gather-w49rt/crc-debug-9h5vn" Oct 06 23:07:09 crc kubenswrapper[5014]: I1006 23:07:09.843727 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rswqj\" (UniqueName: \"kubernetes.io/projected/9359097f-ab2e-483f-a80e-2fe21fa38590-kube-api-access-rswqj\") pod \"crc-debug-9h5vn\" (UID: \"9359097f-ab2e-483f-a80e-2fe21fa38590\") " pod="openshift-must-gather-w49rt/crc-debug-9h5vn" Oct 06 23:07:09 crc kubenswrapper[5014]: I1006 23:07:09.843972 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9359097f-ab2e-483f-a80e-2fe21fa38590-host\") pod \"crc-debug-9h5vn\" (UID: \"9359097f-ab2e-483f-a80e-2fe21fa38590\") " pod="openshift-must-gather-w49rt/crc-debug-9h5vn" Oct 06 23:07:09 crc kubenswrapper[5014]: I1006 23:07:09.884354 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rswqj\" (UniqueName: \"kubernetes.io/projected/9359097f-ab2e-483f-a80e-2fe21fa38590-kube-api-access-rswqj\") pod \"crc-debug-9h5vn\" (UID: \"9359097f-ab2e-483f-a80e-2fe21fa38590\") " pod="openshift-must-gather-w49rt/crc-debug-9h5vn" Oct 06 23:07:09 crc kubenswrapper[5014]: I1006 23:07:09.964125 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w49rt/crc-debug-9h5vn" Oct 06 23:07:10 crc kubenswrapper[5014]: W1006 23:07:10.041615 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9359097f_ab2e_483f_a80e_2fe21fa38590.slice/crio-baeffeb88c3e96eeac20b9c3d3db235eaa9e88f642022e4389baab70f7e4d9cc WatchSource:0}: Error finding container baeffeb88c3e96eeac20b9c3d3db235eaa9e88f642022e4389baab70f7e4d9cc: Status 404 returned error can't find the container with id baeffeb88c3e96eeac20b9c3d3db235eaa9e88f642022e4389baab70f7e4d9cc Oct 06 23:07:10 crc kubenswrapper[5014]: I1006 23:07:10.245475 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w49rt/crc-debug-9h5vn" event={"ID":"9359097f-ab2e-483f-a80e-2fe21fa38590","Type":"ContainerStarted","Data":"baeffeb88c3e96eeac20b9c3d3db235eaa9e88f642022e4389baab70f7e4d9cc"} Oct 06 23:07:11 crc kubenswrapper[5014]: I1006 23:07:11.258334 5014 generic.go:334] "Generic (PLEG): container finished" podID="9359097f-ab2e-483f-a80e-2fe21fa38590" containerID="ed69470791fd224482c7c0f0a3888d732140e99e7385e37ed83cdc5d1ce5a8a4" exitCode=0 Oct 06 23:07:11 crc kubenswrapper[5014]: I1006 23:07:11.258437 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w49rt/crc-debug-9h5vn" event={"ID":"9359097f-ab2e-483f-a80e-2fe21fa38590","Type":"ContainerDied","Data":"ed69470791fd224482c7c0f0a3888d732140e99e7385e37ed83cdc5d1ce5a8a4"} Oct 06 23:07:11 crc kubenswrapper[5014]: I1006 23:07:11.319877 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-w49rt/crc-debug-9h5vn"] Oct 06 23:07:11 crc kubenswrapper[5014]: I1006 23:07:11.330425 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-w49rt/crc-debug-9h5vn"] Oct 06 23:07:12 crc kubenswrapper[5014]: I1006 23:07:12.381076 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w49rt/crc-debug-9h5vn" Oct 06 23:07:12 crc kubenswrapper[5014]: I1006 23:07:12.394746 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9359097f-ab2e-483f-a80e-2fe21fa38590-host\") pod \"9359097f-ab2e-483f-a80e-2fe21fa38590\" (UID: \"9359097f-ab2e-483f-a80e-2fe21fa38590\") " Oct 06 23:07:12 crc kubenswrapper[5014]: I1006 23:07:12.394882 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9359097f-ab2e-483f-a80e-2fe21fa38590-host" (OuterVolumeSpecName: "host") pod "9359097f-ab2e-483f-a80e-2fe21fa38590" (UID: "9359097f-ab2e-483f-a80e-2fe21fa38590"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 06 23:07:12 crc kubenswrapper[5014]: I1006 23:07:12.395040 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rswqj\" (UniqueName: \"kubernetes.io/projected/9359097f-ab2e-483f-a80e-2fe21fa38590-kube-api-access-rswqj\") pod \"9359097f-ab2e-483f-a80e-2fe21fa38590\" (UID: \"9359097f-ab2e-483f-a80e-2fe21fa38590\") " Oct 06 23:07:12 crc kubenswrapper[5014]: I1006 23:07:12.395372 5014 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9359097f-ab2e-483f-a80e-2fe21fa38590-host\") on node \"crc\" DevicePath \"\"" Oct 06 23:07:12 crc kubenswrapper[5014]: I1006 23:07:12.400846 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9359097f-ab2e-483f-a80e-2fe21fa38590-kube-api-access-rswqj" (OuterVolumeSpecName: "kube-api-access-rswqj") pod "9359097f-ab2e-483f-a80e-2fe21fa38590" (UID: "9359097f-ab2e-483f-a80e-2fe21fa38590"). InnerVolumeSpecName "kube-api-access-rswqj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:07:12 crc kubenswrapper[5014]: I1006 23:07:12.496420 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rswqj\" (UniqueName: \"kubernetes.io/projected/9359097f-ab2e-483f-a80e-2fe21fa38590-kube-api-access-rswqj\") on node \"crc\" DevicePath \"\"" Oct 06 23:07:12 crc kubenswrapper[5014]: I1006 23:07:12.951931 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr_2d7d7f9e-7639-4f0c-be5c-567d0120316a/util/0.log" Oct 06 23:07:13 crc kubenswrapper[5014]: I1006 23:07:13.102822 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr_2d7d7f9e-7639-4f0c-be5c-567d0120316a/pull/0.log" Oct 06 23:07:13 crc kubenswrapper[5014]: I1006 23:07:13.117030 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr_2d7d7f9e-7639-4f0c-be5c-567d0120316a/pull/0.log" Oct 06 23:07:13 crc kubenswrapper[5014]: I1006 23:07:13.143771 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr_2d7d7f9e-7639-4f0c-be5c-567d0120316a/util/0.log" Oct 06 23:07:13 crc kubenswrapper[5014]: I1006 23:07:13.264097 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr_2d7d7f9e-7639-4f0c-be5c-567d0120316a/extract/0.log" Oct 06 23:07:13 crc kubenswrapper[5014]: I1006 23:07:13.275457 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr_2d7d7f9e-7639-4f0c-be5c-567d0120316a/pull/0.log" Oct 06 23:07:13 crc kubenswrapper[5014]: I1006 23:07:13.276996 5014 scope.go:117] "RemoveContainer" containerID="ed69470791fd224482c7c0f0a3888d732140e99e7385e37ed83cdc5d1ce5a8a4" Oct 06 23:07:13 crc kubenswrapper[5014]: I1006 23:07:13.277034 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w49rt/crc-debug-9h5vn" Oct 06 23:07:13 crc kubenswrapper[5014]: I1006 23:07:13.331041 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_039f04e203c2c9b61aaff432bd38df0eac11b28bc152523f86f13214ebdc9rr_2d7d7f9e-7639-4f0c-be5c-567d0120316a/util/0.log" Oct 06 23:07:13 crc kubenswrapper[5014]: I1006 23:07:13.453546 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-64f56ff694-fbnjx_e03f73aa-fc56-435f-8f50-e55a813a4b0c/kube-rbac-proxy/0.log" Oct 06 23:07:13 crc kubenswrapper[5014]: I1006 23:07:13.493958 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9359097f-ab2e-483f-a80e-2fe21fa38590" path="/var/lib/kubelet/pods/9359097f-ab2e-483f-a80e-2fe21fa38590/volumes" Oct 06 23:07:13 crc kubenswrapper[5014]: I1006 23:07:13.515145 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-84bd8f6848-wpcz6_89c6e73c-a5c8-4909-85e5-48118abefc95/kube-rbac-proxy/0.log" Oct 06 23:07:13 crc kubenswrapper[5014]: I1006 23:07:13.559300 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-64f56ff694-fbnjx_e03f73aa-fc56-435f-8f50-e55a813a4b0c/manager/0.log" Oct 06 23:07:13 crc kubenswrapper[5014]: I1006 23:07:13.759038 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-84bd8f6848-wpcz6_89c6e73c-a5c8-4909-85e5-48118abefc95/manager/0.log" Oct 06 23:07:13 crc kubenswrapper[5014]: I1006 23:07:13.784973 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-58d86cd59d-8w8ws_6aea271b-eb7b-4e06-8bbb-65807a8027b6/kube-rbac-proxy/0.log" Oct 06 23:07:13 crc kubenswrapper[5014]: I1006 23:07:13.820151 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-58d86cd59d-8w8ws_6aea271b-eb7b-4e06-8bbb-65807a8027b6/manager/0.log" Oct 06 23:07:13 crc kubenswrapper[5014]: I1006 23:07:13.914881 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-fd648f65-7qqn8_3e1db7a5-54f5-4e2c-9edc-3a5f7ce19d9e/kube-rbac-proxy/0.log" Oct 06 23:07:14 crc kubenswrapper[5014]: I1006 23:07:14.084394 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-fd648f65-7qqn8_3e1db7a5-54f5-4e2c-9edc-3a5f7ce19d9e/manager/0.log" Oct 06 23:07:14 crc kubenswrapper[5014]: I1006 23:07:14.110417 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-7ccfc8cf49-9k728_89fc8a32-1a12-4d35-b7fe-2e1fa829c4b7/manager/0.log" Oct 06 23:07:14 crc kubenswrapper[5014]: I1006 23:07:14.124899 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-7ccfc8cf49-9k728_89fc8a32-1a12-4d35-b7fe-2e1fa829c4b7/kube-rbac-proxy/0.log" Oct 06 23:07:14 crc kubenswrapper[5014]: I1006 23:07:14.245161 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5b477879bc-t5mpw_e134fc2c-d3a1-41d2-bf11-257d15bc68c8/kube-rbac-proxy/0.log" Oct 06 23:07:14 crc kubenswrapper[5014]: I1006 23:07:14.282738 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5b477879bc-t5mpw_e134fc2c-d3a1-41d2-bf11-257d15bc68c8/manager/0.log" Oct 06 23:07:14 crc kubenswrapper[5014]: I1006 23:07:14.375546 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-84788b6bc5-zbcqh_df9f59e9-4a23-4204-be0f-5f2729f419b1/kube-rbac-proxy/0.log" Oct 06 23:07:14 crc kubenswrapper[5014]: I1006 23:07:14.456579 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5467f8988c-qzk8x_df7496c8-460c-4f74-83a1-bb1eefeac13c/kube-rbac-proxy/0.log" Oct 06 23:07:14 crc kubenswrapper[5014]: I1006 23:07:14.533011 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5467f8988c-qzk8x_df7496c8-460c-4f74-83a1-bb1eefeac13c/manager/0.log" Oct 06 23:07:14 crc kubenswrapper[5014]: I1006 23:07:14.628906 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-84788b6bc5-zbcqh_df9f59e9-4a23-4204-be0f-5f2729f419b1/manager/0.log" Oct 06 23:07:14 crc kubenswrapper[5014]: I1006 23:07:14.688069 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-5b84cc7657-ml985_1afbb4e8-7c9b-4521-9496-d0df6b2003bb/kube-rbac-proxy/0.log" Oct 06 23:07:14 crc kubenswrapper[5014]: I1006 23:07:14.794067 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-5b84cc7657-ml985_1afbb4e8-7c9b-4521-9496-d0df6b2003bb/manager/0.log" Oct 06 23:07:14 crc kubenswrapper[5014]: I1006 23:07:14.848134 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-7cb48dbc-5b2mb_bade47f3-c50b-4f05-acfb-192e11c5b9e6/kube-rbac-proxy/0.log" Oct 06 23:07:14 crc kubenswrapper[5014]: I1006 23:07:14.878742 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-7cb48dbc-5b2mb_bade47f3-c50b-4f05-acfb-192e11c5b9e6/manager/0.log" Oct 06 23:07:14 crc kubenswrapper[5014]: I1006 23:07:14.975872 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-d6c9dc5bc-jngd6_031743a5-fa78-4f1e-aaba-2b53f308a1b3/kube-rbac-proxy/0.log" Oct 06 23:07:15 crc kubenswrapper[5014]: I1006 23:07:15.036827 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-d6c9dc5bc-jngd6_031743a5-fa78-4f1e-aaba-2b53f308a1b3/manager/0.log" Oct 06 23:07:15 crc kubenswrapper[5014]: I1006 23:07:15.181713 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-69b956fbf6-8fg8b_bea4afa0-51fb-4d29-84ba-807b4adc79cd/kube-rbac-proxy/0.log" Oct 06 23:07:15 crc kubenswrapper[5014]: I1006 23:07:15.201940 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-69b956fbf6-8fg8b_bea4afa0-51fb-4d29-84ba-807b4adc79cd/manager/0.log" Oct 06 23:07:15 crc kubenswrapper[5014]: I1006 23:07:15.240337 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-6c9b57c67-hqrxh_b5c83451-721d-4897-b4b9-996ee0d7ae94/kube-rbac-proxy/0.log" Oct 06 23:07:15 crc kubenswrapper[5014]: I1006 23:07:15.412816 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-69f59f9d8-s6hxr_b46c946e-8936-434c-8a13-0670857929d4/kube-rbac-proxy/0.log" Oct 06 23:07:15 crc kubenswrapper[5014]: I1006 23:07:15.440984 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-6c9b57c67-hqrxh_b5c83451-721d-4897-b4b9-996ee0d7ae94/manager/0.log" Oct 06 23:07:15 crc kubenswrapper[5014]: I1006 23:07:15.451204 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-69f59f9d8-s6hxr_b46c946e-8936-434c-8a13-0670857929d4/manager/0.log" Oct 06 23:07:15 crc kubenswrapper[5014]: I1006 23:07:15.606240 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd_c4de151c-737e-4ff2-8445-571ec2a5a8cd/kube-rbac-proxy/0.log" Oct 06 23:07:15 crc kubenswrapper[5014]: I1006 23:07:15.637803 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-655ddf4d58n7mhd_c4de151c-737e-4ff2-8445-571ec2a5a8cd/manager/0.log" Oct 06 23:07:15 crc kubenswrapper[5014]: I1006 23:07:15.758525 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-579544b768-thzrl_d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc/kube-rbac-proxy/0.log" Oct 06 23:07:15 crc kubenswrapper[5014]: I1006 23:07:15.814343 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-69f5b7986c-tgvqq_b683d4c8-6991-42f2-bd7d-968080905450/kube-rbac-proxy/0.log" Oct 06 23:07:16 crc kubenswrapper[5014]: I1006 23:07:16.187138 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-n4spq_b24a58b8-c8d7-4d2b-95ea-fa25ae191b21/registry-server/0.log" Oct 06 23:07:16 crc kubenswrapper[5014]: I1006 23:07:16.216443 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-69f5b7986c-tgvqq_b683d4c8-6991-42f2-bd7d-968080905450/operator/0.log" Oct 06 23:07:16 crc kubenswrapper[5014]: I1006 23:07:16.423774 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-54d485fd9-mq79t_840c119a-0407-45bd-9588-795a38db80a8/kube-rbac-proxy/0.log" Oct 06 23:07:16 crc kubenswrapper[5014]: I1006 23:07:16.442861 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-54d485fd9-mq79t_840c119a-0407-45bd-9588-795a38db80a8/manager/0.log" Oct 06 23:07:16 crc kubenswrapper[5014]: I1006 23:07:16.470957 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-66f6d6849b-kwbrs_41b602d9-b505-4656-8dfa-1443404db0c1/kube-rbac-proxy/0.log" Oct 06 23:07:16 crc kubenswrapper[5014]: I1006 23:07:16.683077 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-66f6d6849b-kwbrs_41b602d9-b505-4656-8dfa-1443404db0c1/manager/0.log" Oct 06 23:07:16 crc kubenswrapper[5014]: I1006 23:07:16.691142 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-zhnzr_c1d9fb64-549f-4b44-bed2-b49d474beb39/operator/0.log" Oct 06 23:07:16 crc kubenswrapper[5014]: I1006 23:07:16.821362 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-579544b768-thzrl_d6f8bbbe-296a-4eb2-a1e7-90448f4be8bc/manager/0.log" Oct 06 23:07:16 crc kubenswrapper[5014]: I1006 23:07:16.821656 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-76d5577b-w8ds6_b73339a9-4a16-41e5-8739-4822f0199db4/kube-rbac-proxy/0.log" Oct 06 23:07:16 crc kubenswrapper[5014]: I1006 23:07:16.888481 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-76d5577b-w8ds6_b73339a9-4a16-41e5-8739-4822f0199db4/manager/0.log" Oct 06 23:07:16 crc kubenswrapper[5014]: I1006 23:07:16.944720 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-f589c7597-scb7g_17d7e65a-c72c-4d26-81e3-8374778c5c3b/kube-rbac-proxy/0.log" Oct 06 23:07:17 crc kubenswrapper[5014]: I1006 23:07:17.045102 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-f589c7597-scb7g_17d7e65a-c72c-4d26-81e3-8374778c5c3b/manager/0.log" Oct 06 23:07:17 crc kubenswrapper[5014]: I1006 23:07:17.064342 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-6bb6dcddc-v5l5b_bd4146d3-3013-497c-85c1-37d5c7ea2e7c/manager/0.log" Oct 06 23:07:17 crc kubenswrapper[5014]: I1006 23:07:17.107553 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-6bb6dcddc-v5l5b_bd4146d3-3013-497c-85c1-37d5c7ea2e7c/kube-rbac-proxy/0.log" Oct 06 23:07:17 crc kubenswrapper[5014]: I1006 23:07:17.196498 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5d98cc5575-md7kb_6c35d36c-16ac-4ba1-9166-32c9a56ba6a0/kube-rbac-proxy/0.log" Oct 06 23:07:17 crc kubenswrapper[5014]: I1006 23:07:17.215547 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5d98cc5575-md7kb_6c35d36c-16ac-4ba1-9166-32c9a56ba6a0/manager/0.log" Oct 06 23:07:33 crc kubenswrapper[5014]: I1006 23:07:33.192731 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-77xrv_101e93eb-cfad-49df-95ce-b6b12664dd3a/control-plane-machine-set-operator/0.log" Oct 06 23:07:33 crc kubenswrapper[5014]: I1006 23:07:33.367113 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-ffz9s_f7160d37-518f-49e6-aee8-ce14c3267c54/machine-api-operator/0.log" Oct 06 23:07:33 crc kubenswrapper[5014]: I1006 23:07:33.367665 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-ffz9s_f7160d37-518f-49e6-aee8-ce14c3267c54/kube-rbac-proxy/0.log" Oct 06 23:07:46 crc kubenswrapper[5014]: I1006 23:07:46.400277 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-7d4cc89fcb-ttqz7_8ddbd27d-bb5c-4d47-90bc-7215f30636aa/cert-manager-controller/0.log" Oct 06 23:07:46 crc kubenswrapper[5014]: I1006 23:07:46.517000 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7d9f95dbf-kg5dg_c2d94b0d-307d-473d-b0dc-441e22f2c606/cert-manager-cainjector/0.log" Oct 06 23:07:46 crc kubenswrapper[5014]: I1006 23:07:46.605815 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-d969966f-5vl2k_c4bf31a0-2c7c-4936-8f03-928208b80a04/cert-manager-webhook/0.log" Oct 06 23:07:58 crc kubenswrapper[5014]: I1006 23:07:58.662797 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-6b874cbd85-8b8c4_054f0bcd-6bff-4bda-87f2-5863aa08d4f9/nmstate-console-plugin/0.log" Oct 06 23:07:58 crc kubenswrapper[5014]: I1006 23:07:58.809608 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-fzdcf_886eb5f7-fd1f-4153-b290-a36f31fe58b8/nmstate-handler/0.log" Oct 06 23:07:58 crc kubenswrapper[5014]: I1006 23:07:58.867696 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-fdff9cb8d-w5jdd_6c36947b-29b2-4c5e-888f-05d99e9a7ffd/kube-rbac-proxy/0.log" Oct 06 23:07:58 crc kubenswrapper[5014]: I1006 23:07:58.890398 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-fdff9cb8d-w5jdd_6c36947b-29b2-4c5e-888f-05d99e9a7ffd/nmstate-metrics/0.log" Oct 06 23:07:59 crc kubenswrapper[5014]: I1006 23:07:59.046916 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6cdbc54649-shqhf_e36e22ae-e89e-4303-a79c-7d590084348e/nmstate-webhook/0.log" Oct 06 23:07:59 crc kubenswrapper[5014]: I1006 23:07:59.048802 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-858ddd8f98-fzvp8_e4d6ee06-3721-49c5-ab97-06291d64eb68/nmstate-operator/0.log" Oct 06 23:08:12 crc kubenswrapper[5014]: I1006 23:08:12.947166 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-68d546b9d8-5tmn6_99f234c2-01a1-49fe-8979-2b8bfaa3f08b/kube-rbac-proxy/0.log" Oct 06 23:08:13 crc kubenswrapper[5014]: I1006 23:08:13.220153 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/cp-frr-files/0.log" Oct 06 23:08:13 crc kubenswrapper[5014]: I1006 23:08:13.342985 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/cp-frr-files/0.log" Oct 06 23:08:13 crc kubenswrapper[5014]: I1006 23:08:13.398229 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-68d546b9d8-5tmn6_99f234c2-01a1-49fe-8979-2b8bfaa3f08b/controller/0.log" Oct 06 23:08:13 crc kubenswrapper[5014]: I1006 23:08:13.416108 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/cp-reloader/0.log" Oct 06 23:08:13 crc kubenswrapper[5014]: I1006 23:08:13.417605 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/cp-metrics/0.log" Oct 06 23:08:13 crc kubenswrapper[5014]: I1006 23:08:13.519583 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/cp-reloader/0.log" Oct 06 23:08:13 crc kubenswrapper[5014]: I1006 23:08:13.696448 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/cp-frr-files/0.log" Oct 06 23:08:13 crc kubenswrapper[5014]: I1006 23:08:13.717064 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/cp-reloader/0.log" Oct 06 23:08:13 crc kubenswrapper[5014]: I1006 23:08:13.723023 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/cp-metrics/0.log" Oct 06 23:08:13 crc kubenswrapper[5014]: I1006 23:08:13.783529 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/cp-metrics/0.log" Oct 06 23:08:13 crc kubenswrapper[5014]: I1006 23:08:13.929751 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/cp-metrics/0.log" Oct 06 23:08:13 crc kubenswrapper[5014]: I1006 23:08:13.955194 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/cp-reloader/0.log" Oct 06 23:08:13 crc kubenswrapper[5014]: I1006 23:08:13.963967 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/controller/0.log" Oct 06 23:08:13 crc kubenswrapper[5014]: I1006 23:08:13.964746 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/cp-frr-files/0.log" Oct 06 23:08:14 crc kubenswrapper[5014]: I1006 23:08:14.129095 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/kube-rbac-proxy/0.log" Oct 06 23:08:14 crc kubenswrapper[5014]: I1006 23:08:14.158500 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/kube-rbac-proxy-frr/0.log" Oct 06 23:08:14 crc kubenswrapper[5014]: I1006 23:08:14.161112 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/frr-metrics/0.log" Oct 06 23:08:14 crc kubenswrapper[5014]: I1006 23:08:14.325467 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-64bf5d555-5xzfl_54c48252-fba7-4ffa-9c64-a013e59660e1/frr-k8s-webhook-server/0.log" Oct 06 23:08:14 crc kubenswrapper[5014]: I1006 23:08:14.388507 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/reloader/0.log" Oct 06 23:08:14 crc kubenswrapper[5014]: I1006 23:08:14.614984 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6d98768845-hrhnz_ad5a4e1b-2c12-4166-8be6-0ffce1256bf3/manager/0.log" Oct 06 23:08:14 crc kubenswrapper[5014]: I1006 23:08:14.803469 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-6d84c4b8d5-p9mhq_f8acd885-8a21-4e3e-acd9-acb8d65202d0/webhook-server/0.log" Oct 06 23:08:14 crc kubenswrapper[5014]: I1006 23:08:14.820896 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-blzvr_b8e735e8-cf23-4db7-bc61-b7e2d5b245af/kube-rbac-proxy/0.log" Oct 06 23:08:15 crc kubenswrapper[5014]: I1006 23:08:15.428494 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-blzvr_b8e735e8-cf23-4db7-bc61-b7e2d5b245af/speaker/0.log" Oct 06 23:08:15 crc kubenswrapper[5014]: I1006 23:08:15.797058 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-jvrkm_6268ffa5-522f-435a-b01d-dec42e54da42/frr/0.log" Oct 06 23:08:28 crc kubenswrapper[5014]: I1006 23:08:28.053229 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8_5fc5c3ee-98c8-494d-9d3a-410a290a68b0/util/0.log" Oct 06 23:08:28 crc kubenswrapper[5014]: I1006 23:08:28.170344 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8_5fc5c3ee-98c8-494d-9d3a-410a290a68b0/util/0.log" Oct 06 23:08:28 crc kubenswrapper[5014]: I1006 23:08:28.247083 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8_5fc5c3ee-98c8-494d-9d3a-410a290a68b0/pull/0.log" Oct 06 23:08:28 crc kubenswrapper[5014]: I1006 23:08:28.254813 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8_5fc5c3ee-98c8-494d-9d3a-410a290a68b0/pull/0.log" Oct 06 23:08:28 crc kubenswrapper[5014]: I1006 23:08:28.389062 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8_5fc5c3ee-98c8-494d-9d3a-410a290a68b0/extract/0.log" Oct 06 23:08:28 crc kubenswrapper[5014]: I1006 23:08:28.425227 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8_5fc5c3ee-98c8-494d-9d3a-410a290a68b0/util/0.log" Oct 06 23:08:28 crc kubenswrapper[5014]: I1006 23:08:28.434338 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb6949pl8_5fc5c3ee-98c8-494d-9d3a-410a290a68b0/pull/0.log" Oct 06 23:08:28 crc kubenswrapper[5014]: I1006 23:08:28.562950 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l_7ce534c4-b596-45d1-9bf5-6573f8aa71e1/util/0.log" Oct 06 23:08:28 crc kubenswrapper[5014]: I1006 23:08:28.718939 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l_7ce534c4-b596-45d1-9bf5-6573f8aa71e1/pull/0.log" Oct 06 23:08:28 crc kubenswrapper[5014]: I1006 23:08:28.732500 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l_7ce534c4-b596-45d1-9bf5-6573f8aa71e1/pull/0.log" Oct 06 23:08:28 crc kubenswrapper[5014]: I1006 23:08:28.739048 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l_7ce534c4-b596-45d1-9bf5-6573f8aa71e1/util/0.log" Oct 06 23:08:28 crc kubenswrapper[5014]: I1006 23:08:28.883990 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l_7ce534c4-b596-45d1-9bf5-6573f8aa71e1/pull/0.log" Oct 06 23:08:28 crc kubenswrapper[5014]: I1006 23:08:28.884965 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l_7ce534c4-b596-45d1-9bf5-6573f8aa71e1/extract/0.log" Oct 06 23:08:28 crc kubenswrapper[5014]: I1006 23:08:28.947033 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26tg8l_7ce534c4-b596-45d1-9bf5-6573f8aa71e1/util/0.log" Oct 06 23:08:29 crc kubenswrapper[5014]: I1006 23:08:29.054703 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-6h862_e2d1f2da-6708-4d69-ac51-4789097a2ae3/extract-utilities/0.log" Oct 06 23:08:29 crc kubenswrapper[5014]: I1006 23:08:29.193050 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-6h862_e2d1f2da-6708-4d69-ac51-4789097a2ae3/extract-content/0.log" Oct 06 23:08:29 crc kubenswrapper[5014]: I1006 23:08:29.193418 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-6h862_e2d1f2da-6708-4d69-ac51-4789097a2ae3/extract-utilities/0.log" Oct 06 23:08:29 crc kubenswrapper[5014]: I1006 23:08:29.193663 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-6h862_e2d1f2da-6708-4d69-ac51-4789097a2ae3/extract-content/0.log" Oct 06 23:08:29 crc kubenswrapper[5014]: I1006 23:08:29.396988 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-6h862_e2d1f2da-6708-4d69-ac51-4789097a2ae3/extract-utilities/0.log" Oct 06 23:08:29 crc kubenswrapper[5014]: I1006 23:08:29.425159 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-6h862_e2d1f2da-6708-4d69-ac51-4789097a2ae3/extract-content/0.log" Oct 06 23:08:29 crc kubenswrapper[5014]: I1006 23:08:29.567172 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4kg57_41881b03-6274-4b6b-a652-32bfd7ea5be3/extract-utilities/0.log" Oct 06 23:08:29 crc kubenswrapper[5014]: I1006 23:08:29.806488 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4kg57_41881b03-6274-4b6b-a652-32bfd7ea5be3/extract-utilities/0.log" Oct 06 23:08:29 crc kubenswrapper[5014]: I1006 23:08:29.882944 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4kg57_41881b03-6274-4b6b-a652-32bfd7ea5be3/extract-content/0.log" Oct 06 23:08:29 crc kubenswrapper[5014]: I1006 23:08:29.909926 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4kg57_41881b03-6274-4b6b-a652-32bfd7ea5be3/extract-content/0.log" Oct 06 23:08:30 crc kubenswrapper[5014]: I1006 23:08:30.027435 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4kg57_41881b03-6274-4b6b-a652-32bfd7ea5be3/extract-utilities/0.log" Oct 06 23:08:30 crc kubenswrapper[5014]: I1006 23:08:30.044473 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-6h862_e2d1f2da-6708-4d69-ac51-4789097a2ae3/registry-server/0.log" Oct 06 23:08:30 crc kubenswrapper[5014]: I1006 23:08:30.099370 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4kg57_41881b03-6274-4b6b-a652-32bfd7ea5be3/extract-content/0.log" Oct 06 23:08:30 crc kubenswrapper[5014]: I1006 23:08:30.238585 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf_f61ea3cc-1bff-48c1-bd62-79aaf9d617c1/util/0.log" Oct 06 23:08:30 crc kubenswrapper[5014]: I1006 23:08:30.455731 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf_f61ea3cc-1bff-48c1-bd62-79aaf9d617c1/util/0.log" Oct 06 23:08:30 crc kubenswrapper[5014]: I1006 23:08:30.500666 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf_f61ea3cc-1bff-48c1-bd62-79aaf9d617c1/pull/0.log" Oct 06 23:08:30 crc kubenswrapper[5014]: I1006 23:08:30.514514 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf_f61ea3cc-1bff-48c1-bd62-79aaf9d617c1/pull/0.log" Oct 06 23:08:30 crc kubenswrapper[5014]: I1006 23:08:30.675763 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf_f61ea3cc-1bff-48c1-bd62-79aaf9d617c1/util/0.log" Oct 06 23:08:30 crc kubenswrapper[5014]: I1006 23:08:30.699202 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf_f61ea3cc-1bff-48c1-bd62-79aaf9d617c1/pull/0.log" Oct 06 23:08:30 crc kubenswrapper[5014]: I1006 23:08:30.719231 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cnwmbf_f61ea3cc-1bff-48c1-bd62-79aaf9d617c1/extract/0.log" Oct 06 23:08:30 crc kubenswrapper[5014]: I1006 23:08:30.913513 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4kg57_41881b03-6274-4b6b-a652-32bfd7ea5be3/registry-server/0.log" Oct 06 23:08:30 crc kubenswrapper[5014]: I1006 23:08:30.921760 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-fmzl2_2c999244-e7a0-42b2-9c2d-4e9a722617cd/marketplace-operator/0.log" Oct 06 23:08:31 crc kubenswrapper[5014]: I1006 23:08:31.051067 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-krdbw_0c883ea5-1a89-459e-ad85-0ba6deaf0b7e/extract-utilities/0.log" Oct 06 23:08:31 crc kubenswrapper[5014]: I1006 23:08:31.183540 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-krdbw_0c883ea5-1a89-459e-ad85-0ba6deaf0b7e/extract-utilities/0.log" Oct 06 23:08:31 crc kubenswrapper[5014]: I1006 23:08:31.191802 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-krdbw_0c883ea5-1a89-459e-ad85-0ba6deaf0b7e/extract-content/0.log" Oct 06 23:08:31 crc kubenswrapper[5014]: I1006 23:08:31.210215 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-krdbw_0c883ea5-1a89-459e-ad85-0ba6deaf0b7e/extract-content/0.log" Oct 06 23:08:31 crc kubenswrapper[5014]: I1006 23:08:31.358311 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-krdbw_0c883ea5-1a89-459e-ad85-0ba6deaf0b7e/extract-content/0.log" Oct 06 23:08:31 crc kubenswrapper[5014]: I1006 23:08:31.403310 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-krdbw_0c883ea5-1a89-459e-ad85-0ba6deaf0b7e/extract-utilities/0.log" Oct 06 23:08:31 crc kubenswrapper[5014]: I1006 23:08:31.528331 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-b2m7z_85f140b3-dbc6-495e-b10a-fff600c38b58/extract-utilities/0.log" Oct 06 23:08:31 crc kubenswrapper[5014]: I1006 23:08:31.583185 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-krdbw_0c883ea5-1a89-459e-ad85-0ba6deaf0b7e/registry-server/0.log" Oct 06 23:08:31 crc kubenswrapper[5014]: I1006 23:08:31.712204 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-b2m7z_85f140b3-dbc6-495e-b10a-fff600c38b58/extract-utilities/0.log" Oct 06 23:08:31 crc kubenswrapper[5014]: I1006 23:08:31.734762 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-b2m7z_85f140b3-dbc6-495e-b10a-fff600c38b58/extract-content/0.log" Oct 06 23:08:31 crc kubenswrapper[5014]: I1006 23:08:31.750028 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-b2m7z_85f140b3-dbc6-495e-b10a-fff600c38b58/extract-content/0.log" Oct 06 23:08:31 crc kubenswrapper[5014]: I1006 23:08:31.887319 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-b2m7z_85f140b3-dbc6-495e-b10a-fff600c38b58/extract-content/0.log" Oct 06 23:08:31 crc kubenswrapper[5014]: I1006 23:08:31.887737 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-b2m7z_85f140b3-dbc6-495e-b10a-fff600c38b58/extract-utilities/0.log" Oct 06 23:08:32 crc kubenswrapper[5014]: I1006 23:08:32.628042 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-b2m7z_85f140b3-dbc6-495e-b10a-fff600c38b58/registry-server/0.log" Oct 06 23:09:02 crc kubenswrapper[5014]: E1006 23:09:02.175304 5014 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.20:56928->38.102.83.20:42859: write tcp 38.102.83.20:56928->38.102.83.20:42859: write: broken pipe Oct 06 23:09:21 crc kubenswrapper[5014]: I1006 23:09:21.735151 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 23:09:21 crc kubenswrapper[5014]: I1006 23:09:21.735972 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 23:09:41 crc kubenswrapper[5014]: I1006 23:09:41.071020 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-n2tx5"] Oct 06 23:09:41 crc kubenswrapper[5014]: I1006 23:09:41.080815 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-n2tx5"] Oct 06 23:09:41 crc kubenswrapper[5014]: I1006 23:09:41.502998 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4" path="/var/lib/kubelet/pods/ceb0fb2f-29c1-4540-9ea2-55333e9a3bb4/volumes" Oct 06 23:09:51 crc kubenswrapper[5014]: I1006 23:09:51.061767 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-6bfb-account-create-wptvp"] Oct 06 23:09:51 crc kubenswrapper[5014]: I1006 23:09:51.071097 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-6bfb-account-create-wptvp"] Oct 06 23:09:51 crc kubenswrapper[5014]: I1006 23:09:51.511958 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="592b407e-657b-4204-af83-d4fe4508ee7e" path="/var/lib/kubelet/pods/592b407e-657b-4204-af83-d4fe4508ee7e/volumes" Oct 06 23:09:51 crc kubenswrapper[5014]: I1006 23:09:51.735749 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 23:09:51 crc kubenswrapper[5014]: I1006 23:09:51.735815 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 23:09:58 crc kubenswrapper[5014]: I1006 23:09:58.042765 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-j9dm7"] Oct 06 23:09:58 crc kubenswrapper[5014]: I1006 23:09:58.066015 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-j9dm7"] Oct 06 23:09:59 crc kubenswrapper[5014]: I1006 23:09:59.505461 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3c29595-ff94-4ee2-a182-29445fe5d2ad" path="/var/lib/kubelet/pods/c3c29595-ff94-4ee2-a182-29445fe5d2ad/volumes" Oct 06 23:10:04 crc kubenswrapper[5014]: I1006 23:10:04.367223 5014 scope.go:117] "RemoveContainer" containerID="4298df35e3c863a6fd0801b4d5d65657f1f965f3684606edda11c19e512401b8" Oct 06 23:10:04 crc kubenswrapper[5014]: I1006 23:10:04.433476 5014 scope.go:117] "RemoveContainer" containerID="d7a88f93d3d4e108fbc1c5ffa2ef3113212acd44a8b4989c9615c28e069e4636" Oct 06 23:10:04 crc kubenswrapper[5014]: I1006 23:10:04.476092 5014 scope.go:117] "RemoveContainer" containerID="1addd1103523de258e0c26388b5b4aa83b99bcb56ae3b41b45eabe8dede008cb" Oct 06 23:10:11 crc kubenswrapper[5014]: I1006 23:10:11.042681 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-cjxq2"] Oct 06 23:10:11 crc kubenswrapper[5014]: I1006 23:10:11.055856 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-cjxq2"] Oct 06 23:10:11 crc kubenswrapper[5014]: I1006 23:10:11.500766 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1cf816d0-0a38-4417-91a3-be05870aa60d" path="/var/lib/kubelet/pods/1cf816d0-0a38-4417-91a3-be05870aa60d/volumes" Oct 06 23:10:13 crc kubenswrapper[5014]: I1006 23:10:13.101063 5014 generic.go:334] "Generic (PLEG): container finished" podID="37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59" containerID="31d0dd82677de620590608d7659fb0a431036e9d7c0f3553decc48d6858ab0cd" exitCode=0 Oct 06 23:10:13 crc kubenswrapper[5014]: I1006 23:10:13.101147 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-w49rt/must-gather-9jfkz" event={"ID":"37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59","Type":"ContainerDied","Data":"31d0dd82677de620590608d7659fb0a431036e9d7c0f3553decc48d6858ab0cd"} Oct 06 23:10:13 crc kubenswrapper[5014]: I1006 23:10:13.102111 5014 scope.go:117] "RemoveContainer" containerID="31d0dd82677de620590608d7659fb0a431036e9d7c0f3553decc48d6858ab0cd" Oct 06 23:10:13 crc kubenswrapper[5014]: I1006 23:10:13.218736 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-w49rt_must-gather-9jfkz_37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59/gather/0.log" Oct 06 23:10:20 crc kubenswrapper[5014]: I1006 23:10:20.607725 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-w49rt/must-gather-9jfkz"] Oct 06 23:10:20 crc kubenswrapper[5014]: I1006 23:10:20.608465 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-w49rt/must-gather-9jfkz" podUID="37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59" containerName="copy" containerID="cri-o://1b18d7ee9413fd18d6b02b039b21e66bbffd7e81f17e60a2f68177d33adfdff2" gracePeriod=2 Oct 06 23:10:20 crc kubenswrapper[5014]: I1006 23:10:20.619303 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-w49rt/must-gather-9jfkz"] Oct 06 23:10:20 crc kubenswrapper[5014]: I1006 23:10:20.995967 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-w49rt_must-gather-9jfkz_37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59/copy/0.log" Oct 06 23:10:20 crc kubenswrapper[5014]: I1006 23:10:20.996667 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w49rt/must-gather-9jfkz" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.029848 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xhk8s\" (UniqueName: \"kubernetes.io/projected/37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59-kube-api-access-xhk8s\") pod \"37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59\" (UID: \"37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59\") " Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.029898 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59-must-gather-output\") pod \"37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59\" (UID: \"37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59\") " Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.035548 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59-kube-api-access-xhk8s" (OuterVolumeSpecName: "kube-api-access-xhk8s") pod "37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59" (UID: "37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59"). InnerVolumeSpecName "kube-api-access-xhk8s". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.131427 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xhk8s\" (UniqueName: \"kubernetes.io/projected/37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59-kube-api-access-xhk8s\") on node \"crc\" DevicePath \"\"" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.143406 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59" (UID: "37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.192018 5014 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-w49rt_must-gather-9jfkz_37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59/copy/0.log" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.192470 5014 generic.go:334] "Generic (PLEG): container finished" podID="37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59" containerID="1b18d7ee9413fd18d6b02b039b21e66bbffd7e81f17e60a2f68177d33adfdff2" exitCode=143 Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.192531 5014 scope.go:117] "RemoveContainer" containerID="1b18d7ee9413fd18d6b02b039b21e66bbffd7e81f17e60a2f68177d33adfdff2" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.192699 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-w49rt/must-gather-9jfkz" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.228308 5014 scope.go:117] "RemoveContainer" containerID="31d0dd82677de620590608d7659fb0a431036e9d7c0f3553decc48d6858ab0cd" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.233990 5014 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59-must-gather-output\") on node \"crc\" DevicePath \"\"" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.287946 5014 scope.go:117] "RemoveContainer" containerID="1b18d7ee9413fd18d6b02b039b21e66bbffd7e81f17e60a2f68177d33adfdff2" Oct 06 23:10:21 crc kubenswrapper[5014]: E1006 23:10:21.288390 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b18d7ee9413fd18d6b02b039b21e66bbffd7e81f17e60a2f68177d33adfdff2\": container with ID starting with 1b18d7ee9413fd18d6b02b039b21e66bbffd7e81f17e60a2f68177d33adfdff2 not found: ID does not exist" containerID="1b18d7ee9413fd18d6b02b039b21e66bbffd7e81f17e60a2f68177d33adfdff2" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.288423 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b18d7ee9413fd18d6b02b039b21e66bbffd7e81f17e60a2f68177d33adfdff2"} err="failed to get container status \"1b18d7ee9413fd18d6b02b039b21e66bbffd7e81f17e60a2f68177d33adfdff2\": rpc error: code = NotFound desc = could not find container \"1b18d7ee9413fd18d6b02b039b21e66bbffd7e81f17e60a2f68177d33adfdff2\": container with ID starting with 1b18d7ee9413fd18d6b02b039b21e66bbffd7e81f17e60a2f68177d33adfdff2 not found: ID does not exist" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.288443 5014 scope.go:117] "RemoveContainer" containerID="31d0dd82677de620590608d7659fb0a431036e9d7c0f3553decc48d6858ab0cd" Oct 06 23:10:21 crc kubenswrapper[5014]: E1006 23:10:21.288977 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31d0dd82677de620590608d7659fb0a431036e9d7c0f3553decc48d6858ab0cd\": container with ID starting with 31d0dd82677de620590608d7659fb0a431036e9d7c0f3553decc48d6858ab0cd not found: ID does not exist" containerID="31d0dd82677de620590608d7659fb0a431036e9d7c0f3553decc48d6858ab0cd" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.289018 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31d0dd82677de620590608d7659fb0a431036e9d7c0f3553decc48d6858ab0cd"} err="failed to get container status \"31d0dd82677de620590608d7659fb0a431036e9d7c0f3553decc48d6858ab0cd\": rpc error: code = NotFound desc = could not find container \"31d0dd82677de620590608d7659fb0a431036e9d7c0f3553decc48d6858ab0cd\": container with ID starting with 31d0dd82677de620590608d7659fb0a431036e9d7c0f3553decc48d6858ab0cd not found: ID does not exist" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.498037 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59" path="/var/lib/kubelet/pods/37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59/volumes" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.735574 5014 patch_prober.go:28] interesting pod/machine-config-daemon-6bths container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.735646 5014 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.735695 5014 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-6bths" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.736402 5014 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d31155bd7b4769b3eedfded0dec5a850227452b5b5684203cfea03e6550fc584"} pod="openshift-machine-config-operator/machine-config-daemon-6bths" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 06 23:10:21 crc kubenswrapper[5014]: I1006 23:10:21.736462 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerName="machine-config-daemon" containerID="cri-o://d31155bd7b4769b3eedfded0dec5a850227452b5b5684203cfea03e6550fc584" gracePeriod=600 Oct 06 23:10:21 crc kubenswrapper[5014]: E1006 23:10:21.860534 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:10:22 crc kubenswrapper[5014]: I1006 23:10:22.204449 5014 generic.go:334] "Generic (PLEG): container finished" podID="33478e0f-9143-4e11-96a1-04c53f0f6277" containerID="d31155bd7b4769b3eedfded0dec5a850227452b5b5684203cfea03e6550fc584" exitCode=0 Oct 06 23:10:22 crc kubenswrapper[5014]: I1006 23:10:22.204568 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-6bths" event={"ID":"33478e0f-9143-4e11-96a1-04c53f0f6277","Type":"ContainerDied","Data":"d31155bd7b4769b3eedfded0dec5a850227452b5b5684203cfea03e6550fc584"} Oct 06 23:10:22 crc kubenswrapper[5014]: I1006 23:10:22.205105 5014 scope.go:117] "RemoveContainer" containerID="b714fdc15c24e51123635f494a6a27ab187d08603b236a717c87633ed54de216" Oct 06 23:10:22 crc kubenswrapper[5014]: I1006 23:10:22.206209 5014 scope.go:117] "RemoveContainer" containerID="d31155bd7b4769b3eedfded0dec5a850227452b5b5684203cfea03e6550fc584" Oct 06 23:10:22 crc kubenswrapper[5014]: E1006 23:10:22.206702 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:10:33 crc kubenswrapper[5014]: I1006 23:10:33.484893 5014 scope.go:117] "RemoveContainer" containerID="d31155bd7b4769b3eedfded0dec5a850227452b5b5684203cfea03e6550fc584" Oct 06 23:10:33 crc kubenswrapper[5014]: E1006 23:10:33.486115 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.043043 5014 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ltxmx"] Oct 06 23:10:46 crc kubenswrapper[5014]: E1006 23:10:46.044369 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59" containerName="gather" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.044390 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59" containerName="gather" Oct 06 23:10:46 crc kubenswrapper[5014]: E1006 23:10:46.044425 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59" containerName="copy" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.044437 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59" containerName="copy" Oct 06 23:10:46 crc kubenswrapper[5014]: E1006 23:10:46.044480 5014 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9359097f-ab2e-483f-a80e-2fe21fa38590" containerName="container-00" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.044493 5014 state_mem.go:107] "Deleted CPUSet assignment" podUID="9359097f-ab2e-483f-a80e-2fe21fa38590" containerName="container-00" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.044833 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59" containerName="copy" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.044893 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="37e64fc6-8b17-4b93-a41d-0a1c3dc3ea59" containerName="gather" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.044924 5014 memory_manager.go:354] "RemoveStaleState removing state" podUID="9359097f-ab2e-483f-a80e-2fe21fa38590" containerName="container-00" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.047524 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.055001 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ltxmx"] Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.133067 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t82zs\" (UniqueName: \"kubernetes.io/projected/07fc66ba-4920-4db5-86b1-bbf857eb39d8-kube-api-access-t82zs\") pod \"certified-operators-ltxmx\" (UID: \"07fc66ba-4920-4db5-86b1-bbf857eb39d8\") " pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.133300 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07fc66ba-4920-4db5-86b1-bbf857eb39d8-catalog-content\") pod \"certified-operators-ltxmx\" (UID: \"07fc66ba-4920-4db5-86b1-bbf857eb39d8\") " pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.133361 5014 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07fc66ba-4920-4db5-86b1-bbf857eb39d8-utilities\") pod \"certified-operators-ltxmx\" (UID: \"07fc66ba-4920-4db5-86b1-bbf857eb39d8\") " pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.235199 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t82zs\" (UniqueName: \"kubernetes.io/projected/07fc66ba-4920-4db5-86b1-bbf857eb39d8-kube-api-access-t82zs\") pod \"certified-operators-ltxmx\" (UID: \"07fc66ba-4920-4db5-86b1-bbf857eb39d8\") " pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.235435 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07fc66ba-4920-4db5-86b1-bbf857eb39d8-catalog-content\") pod \"certified-operators-ltxmx\" (UID: \"07fc66ba-4920-4db5-86b1-bbf857eb39d8\") " pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.236045 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07fc66ba-4920-4db5-86b1-bbf857eb39d8-catalog-content\") pod \"certified-operators-ltxmx\" (UID: \"07fc66ba-4920-4db5-86b1-bbf857eb39d8\") " pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.236163 5014 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07fc66ba-4920-4db5-86b1-bbf857eb39d8-utilities\") pod \"certified-operators-ltxmx\" (UID: \"07fc66ba-4920-4db5-86b1-bbf857eb39d8\") " pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.236491 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07fc66ba-4920-4db5-86b1-bbf857eb39d8-utilities\") pod \"certified-operators-ltxmx\" (UID: \"07fc66ba-4920-4db5-86b1-bbf857eb39d8\") " pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.273244 5014 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t82zs\" (UniqueName: \"kubernetes.io/projected/07fc66ba-4920-4db5-86b1-bbf857eb39d8-kube-api-access-t82zs\") pod \"certified-operators-ltxmx\" (UID: \"07fc66ba-4920-4db5-86b1-bbf857eb39d8\") " pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.401993 5014 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.484348 5014 scope.go:117] "RemoveContainer" containerID="d31155bd7b4769b3eedfded0dec5a850227452b5b5684203cfea03e6550fc584" Oct 06 23:10:46 crc kubenswrapper[5014]: E1006 23:10:46.484610 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:10:46 crc kubenswrapper[5014]: I1006 23:10:46.907585 5014 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ltxmx"] Oct 06 23:10:46 crc kubenswrapper[5014]: W1006 23:10:46.918591 5014 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod07fc66ba_4920_4db5_86b1_bbf857eb39d8.slice/crio-8ad3ad022cfdb8ca7ea90e1078483ff0101c4617c2a70ca67cb3438eb3aafa8e WatchSource:0}: Error finding container 8ad3ad022cfdb8ca7ea90e1078483ff0101c4617c2a70ca67cb3438eb3aafa8e: Status 404 returned error can't find the container with id 8ad3ad022cfdb8ca7ea90e1078483ff0101c4617c2a70ca67cb3438eb3aafa8e Oct 06 23:10:47 crc kubenswrapper[5014]: I1006 23:10:47.524266 5014 generic.go:334] "Generic (PLEG): container finished" podID="07fc66ba-4920-4db5-86b1-bbf857eb39d8" containerID="34e1107b0b994a655c5c8acf128972248d745d816e080d1d0769fc36febba5f6" exitCode=0 Oct 06 23:10:47 crc kubenswrapper[5014]: I1006 23:10:47.524464 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ltxmx" event={"ID":"07fc66ba-4920-4db5-86b1-bbf857eb39d8","Type":"ContainerDied","Data":"34e1107b0b994a655c5c8acf128972248d745d816e080d1d0769fc36febba5f6"} Oct 06 23:10:47 crc kubenswrapper[5014]: I1006 23:10:47.524670 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ltxmx" event={"ID":"07fc66ba-4920-4db5-86b1-bbf857eb39d8","Type":"ContainerStarted","Data":"8ad3ad022cfdb8ca7ea90e1078483ff0101c4617c2a70ca67cb3438eb3aafa8e"} Oct 06 23:10:47 crc kubenswrapper[5014]: I1006 23:10:47.526433 5014 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 06 23:10:49 crc kubenswrapper[5014]: I1006 23:10:49.556926 5014 generic.go:334] "Generic (PLEG): container finished" podID="07fc66ba-4920-4db5-86b1-bbf857eb39d8" containerID="a1b5f6ce93f1e8ff6d83d6cf74e389f4efda2fbaf1c57038e9400c163d65d05e" exitCode=0 Oct 06 23:10:49 crc kubenswrapper[5014]: I1006 23:10:49.556984 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ltxmx" event={"ID":"07fc66ba-4920-4db5-86b1-bbf857eb39d8","Type":"ContainerDied","Data":"a1b5f6ce93f1e8ff6d83d6cf74e389f4efda2fbaf1c57038e9400c163d65d05e"} Oct 06 23:10:51 crc kubenswrapper[5014]: I1006 23:10:51.581468 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ltxmx" event={"ID":"07fc66ba-4920-4db5-86b1-bbf857eb39d8","Type":"ContainerStarted","Data":"f7a6817b137e0ac178b7b0e1720939bc8487d0fa77ce8c71b013f19abdae785e"} Oct 06 23:10:51 crc kubenswrapper[5014]: I1006 23:10:51.604397 5014 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ltxmx" podStartSLOduration=3.6853982739999998 podStartE2EDuration="6.604376316s" podCreationTimestamp="2025-10-06 23:10:45 +0000 UTC" firstStartedPulling="2025-10-06 23:10:47.526235885 +0000 UTC m=+5992.819272619" lastFinishedPulling="2025-10-06 23:10:50.445213887 +0000 UTC m=+5995.738250661" observedRunningTime="2025-10-06 23:10:51.602925582 +0000 UTC m=+5996.895962346" watchObservedRunningTime="2025-10-06 23:10:51.604376316 +0000 UTC m=+5996.897413060" Oct 06 23:10:56 crc kubenswrapper[5014]: I1006 23:10:56.402579 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:56 crc kubenswrapper[5014]: I1006 23:10:56.403243 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:56 crc kubenswrapper[5014]: I1006 23:10:56.489543 5014 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:56 crc kubenswrapper[5014]: I1006 23:10:56.704663 5014 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:56 crc kubenswrapper[5014]: I1006 23:10:56.764021 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ltxmx"] Oct 06 23:10:58 crc kubenswrapper[5014]: I1006 23:10:58.674899 5014 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ltxmx" podUID="07fc66ba-4920-4db5-86b1-bbf857eb39d8" containerName="registry-server" containerID="cri-o://f7a6817b137e0ac178b7b0e1720939bc8487d0fa77ce8c71b013f19abdae785e" gracePeriod=2 Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.329667 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.501719 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07fc66ba-4920-4db5-86b1-bbf857eb39d8-utilities\") pod \"07fc66ba-4920-4db5-86b1-bbf857eb39d8\" (UID: \"07fc66ba-4920-4db5-86b1-bbf857eb39d8\") " Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.501925 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t82zs\" (UniqueName: \"kubernetes.io/projected/07fc66ba-4920-4db5-86b1-bbf857eb39d8-kube-api-access-t82zs\") pod \"07fc66ba-4920-4db5-86b1-bbf857eb39d8\" (UID: \"07fc66ba-4920-4db5-86b1-bbf857eb39d8\") " Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.502576 5014 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07fc66ba-4920-4db5-86b1-bbf857eb39d8-catalog-content\") pod \"07fc66ba-4920-4db5-86b1-bbf857eb39d8\" (UID: \"07fc66ba-4920-4db5-86b1-bbf857eb39d8\") " Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.503309 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07fc66ba-4920-4db5-86b1-bbf857eb39d8-utilities" (OuterVolumeSpecName: "utilities") pod "07fc66ba-4920-4db5-86b1-bbf857eb39d8" (UID: "07fc66ba-4920-4db5-86b1-bbf857eb39d8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.507969 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07fc66ba-4920-4db5-86b1-bbf857eb39d8-kube-api-access-t82zs" (OuterVolumeSpecName: "kube-api-access-t82zs") pod "07fc66ba-4920-4db5-86b1-bbf857eb39d8" (UID: "07fc66ba-4920-4db5-86b1-bbf857eb39d8"). InnerVolumeSpecName "kube-api-access-t82zs". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.605285 5014 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07fc66ba-4920-4db5-86b1-bbf857eb39d8-utilities\") on node \"crc\" DevicePath \"\"" Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.605323 5014 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t82zs\" (UniqueName: \"kubernetes.io/projected/07fc66ba-4920-4db5-86b1-bbf857eb39d8-kube-api-access-t82zs\") on node \"crc\" DevicePath \"\"" Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.688009 5014 generic.go:334] "Generic (PLEG): container finished" podID="07fc66ba-4920-4db5-86b1-bbf857eb39d8" containerID="f7a6817b137e0ac178b7b0e1720939bc8487d0fa77ce8c71b013f19abdae785e" exitCode=0 Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.688056 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ltxmx" event={"ID":"07fc66ba-4920-4db5-86b1-bbf857eb39d8","Type":"ContainerDied","Data":"f7a6817b137e0ac178b7b0e1720939bc8487d0fa77ce8c71b013f19abdae785e"} Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.688085 5014 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ltxmx" event={"ID":"07fc66ba-4920-4db5-86b1-bbf857eb39d8","Type":"ContainerDied","Data":"8ad3ad022cfdb8ca7ea90e1078483ff0101c4617c2a70ca67cb3438eb3aafa8e"} Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.688108 5014 scope.go:117] "RemoveContainer" containerID="f7a6817b137e0ac178b7b0e1720939bc8487d0fa77ce8c71b013f19abdae785e" Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.688218 5014 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ltxmx" Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.715818 5014 scope.go:117] "RemoveContainer" containerID="a1b5f6ce93f1e8ff6d83d6cf74e389f4efda2fbaf1c57038e9400c163d65d05e" Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.745324 5014 scope.go:117] "RemoveContainer" containerID="34e1107b0b994a655c5c8acf128972248d745d816e080d1d0769fc36febba5f6" Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.799520 5014 scope.go:117] "RemoveContainer" containerID="f7a6817b137e0ac178b7b0e1720939bc8487d0fa77ce8c71b013f19abdae785e" Oct 06 23:10:59 crc kubenswrapper[5014]: E1006 23:10:59.800038 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7a6817b137e0ac178b7b0e1720939bc8487d0fa77ce8c71b013f19abdae785e\": container with ID starting with f7a6817b137e0ac178b7b0e1720939bc8487d0fa77ce8c71b013f19abdae785e not found: ID does not exist" containerID="f7a6817b137e0ac178b7b0e1720939bc8487d0fa77ce8c71b013f19abdae785e" Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.800122 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7a6817b137e0ac178b7b0e1720939bc8487d0fa77ce8c71b013f19abdae785e"} err="failed to get container status \"f7a6817b137e0ac178b7b0e1720939bc8487d0fa77ce8c71b013f19abdae785e\": rpc error: code = NotFound desc = could not find container \"f7a6817b137e0ac178b7b0e1720939bc8487d0fa77ce8c71b013f19abdae785e\": container with ID starting with f7a6817b137e0ac178b7b0e1720939bc8487d0fa77ce8c71b013f19abdae785e not found: ID does not exist" Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.800176 5014 scope.go:117] "RemoveContainer" containerID="a1b5f6ce93f1e8ff6d83d6cf74e389f4efda2fbaf1c57038e9400c163d65d05e" Oct 06 23:10:59 crc kubenswrapper[5014]: E1006 23:10:59.802514 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1b5f6ce93f1e8ff6d83d6cf74e389f4efda2fbaf1c57038e9400c163d65d05e\": container with ID starting with a1b5f6ce93f1e8ff6d83d6cf74e389f4efda2fbaf1c57038e9400c163d65d05e not found: ID does not exist" containerID="a1b5f6ce93f1e8ff6d83d6cf74e389f4efda2fbaf1c57038e9400c163d65d05e" Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.802570 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1b5f6ce93f1e8ff6d83d6cf74e389f4efda2fbaf1c57038e9400c163d65d05e"} err="failed to get container status \"a1b5f6ce93f1e8ff6d83d6cf74e389f4efda2fbaf1c57038e9400c163d65d05e\": rpc error: code = NotFound desc = could not find container \"a1b5f6ce93f1e8ff6d83d6cf74e389f4efda2fbaf1c57038e9400c163d65d05e\": container with ID starting with a1b5f6ce93f1e8ff6d83d6cf74e389f4efda2fbaf1c57038e9400c163d65d05e not found: ID does not exist" Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.802612 5014 scope.go:117] "RemoveContainer" containerID="34e1107b0b994a655c5c8acf128972248d745d816e080d1d0769fc36febba5f6" Oct 06 23:10:59 crc kubenswrapper[5014]: E1006 23:10:59.803179 5014 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34e1107b0b994a655c5c8acf128972248d745d816e080d1d0769fc36febba5f6\": container with ID starting with 34e1107b0b994a655c5c8acf128972248d745d816e080d1d0769fc36febba5f6 not found: ID does not exist" containerID="34e1107b0b994a655c5c8acf128972248d745d816e080d1d0769fc36febba5f6" Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.803236 5014 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34e1107b0b994a655c5c8acf128972248d745d816e080d1d0769fc36febba5f6"} err="failed to get container status \"34e1107b0b994a655c5c8acf128972248d745d816e080d1d0769fc36febba5f6\": rpc error: code = NotFound desc = could not find container \"34e1107b0b994a655c5c8acf128972248d745d816e080d1d0769fc36febba5f6\": container with ID starting with 34e1107b0b994a655c5c8acf128972248d745d816e080d1d0769fc36febba5f6 not found: ID does not exist" Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.902791 5014 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07fc66ba-4920-4db5-86b1-bbf857eb39d8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "07fc66ba-4920-4db5-86b1-bbf857eb39d8" (UID: "07fc66ba-4920-4db5-86b1-bbf857eb39d8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 06 23:10:59 crc kubenswrapper[5014]: I1006 23:10:59.911309 5014 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07fc66ba-4920-4db5-86b1-bbf857eb39d8-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 06 23:11:00 crc kubenswrapper[5014]: I1006 23:11:00.036223 5014 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ltxmx"] Oct 06 23:11:00 crc kubenswrapper[5014]: I1006 23:11:00.049997 5014 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ltxmx"] Oct 06 23:11:00 crc kubenswrapper[5014]: I1006 23:11:00.484966 5014 scope.go:117] "RemoveContainer" containerID="d31155bd7b4769b3eedfded0dec5a850227452b5b5684203cfea03e6550fc584" Oct 06 23:11:00 crc kubenswrapper[5014]: E1006 23:11:00.485354 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:11:01 crc kubenswrapper[5014]: I1006 23:11:01.501656 5014 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07fc66ba-4920-4db5-86b1-bbf857eb39d8" path="/var/lib/kubelet/pods/07fc66ba-4920-4db5-86b1-bbf857eb39d8/volumes" Oct 06 23:11:04 crc kubenswrapper[5014]: I1006 23:11:04.626450 5014 scope.go:117] "RemoveContainer" containerID="d2f1514b1e8722df8292b25012388ed4e1f253e2299f2e46fe48fc04d83f4345" Oct 06 23:11:14 crc kubenswrapper[5014]: I1006 23:11:14.485034 5014 scope.go:117] "RemoveContainer" containerID="d31155bd7b4769b3eedfded0dec5a850227452b5b5684203cfea03e6550fc584" Oct 06 23:11:14 crc kubenswrapper[5014]: E1006 23:11:14.485946 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:11:25 crc kubenswrapper[5014]: I1006 23:11:25.489291 5014 scope.go:117] "RemoveContainer" containerID="d31155bd7b4769b3eedfded0dec5a850227452b5b5684203cfea03e6550fc584" Oct 06 23:11:25 crc kubenswrapper[5014]: E1006 23:11:25.490366 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:11:40 crc kubenswrapper[5014]: I1006 23:11:40.485403 5014 scope.go:117] "RemoveContainer" containerID="d31155bd7b4769b3eedfded0dec5a850227452b5b5684203cfea03e6550fc584" Oct 06 23:11:40 crc kubenswrapper[5014]: E1006 23:11:40.486303 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:11:53 crc kubenswrapper[5014]: I1006 23:11:53.485019 5014 scope.go:117] "RemoveContainer" containerID="d31155bd7b4769b3eedfded0dec5a850227452b5b5684203cfea03e6550fc584" Oct 06 23:11:53 crc kubenswrapper[5014]: E1006 23:11:53.485900 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" Oct 06 23:12:04 crc kubenswrapper[5014]: I1006 23:12:04.778510 5014 scope.go:117] "RemoveContainer" containerID="2e089cfd2603d3f672be58d54b59a18b573100e841847416a340e470ac7501b3" Oct 06 23:12:06 crc kubenswrapper[5014]: I1006 23:12:06.484807 5014 scope.go:117] "RemoveContainer" containerID="d31155bd7b4769b3eedfded0dec5a850227452b5b5684203cfea03e6550fc584" Oct 06 23:12:06 crc kubenswrapper[5014]: E1006 23:12:06.485502 5014 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-6bths_openshift-machine-config-operator(33478e0f-9143-4e11-96a1-04c53f0f6277)\"" pod="openshift-machine-config-operator/machine-config-daemon-6bths" podUID="33478e0f-9143-4e11-96a1-04c53f0f6277" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515071046321024444 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015071046321017361 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015071032065016504 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015071032066015455 5ustar corecore